From cdf752a47700cbfc0d19cfdc25eb001f51fae19d Mon Sep 17 00:00:00 2001 From: Cunliang Geng Date: Fri, 12 Jul 2024 10:06:20 +0100 Subject: [PATCH] Deployed 2025abd to 2.0.0a3 with MkDocs 1.6.0 and mike 2.1.1 --- 2.0.0a3/404.html | 1414 ++++ 2.0.0a3/api/antismash/index.html | 3528 +++++++++ 2.0.0a3/api/arranger/index.html | 3457 +++++++++ 2.0.0a3/api/bigscape/index.html | 2440 ++++++ 2.0.0a3/api/genomics/index.html | 3357 +++++++++ 2.0.0a3/api/genomics_abc/index.html | 2088 +++++ 2.0.0a3/api/genomics_utils/index.html | 3056 ++++++++ 2.0.0a3/api/gnps/index.html | 4595 +++++++++++ 2.0.0a3/api/loader/index.html | 2537 +++++++ 2.0.0a3/api/metabolomics/index.html | 3161 ++++++++ 2.0.0a3/api/metabolomics_abc/index.html | 2151 ++++++ 2.0.0a3/api/metabolomics_utils/index.html | 2637 +++++++ 2.0.0a3/api/mibig/index.html | 2774 +++++++ 2.0.0a3/api/nplinker/index.html | 4584 +++++++++++ 2.0.0a3/api/schema/index.html | 1924 +++++ 2.0.0a3/api/scoring/index.html | 2406 ++++++ 2.0.0a3/api/scoring_abc/index.html | 2073 +++++ 2.0.0a3/api/scoring_methods/index.html | 2974 ++++++++ 2.0.0a3/api/scoring_utils/index.html | 1801 +++++ 2.0.0a3/api/strain/index.html | 3039 ++++++++ 2.0.0a3/api/strain_utils/index.html | 2119 ++++++ 2.0.0a3/api/utils/index.html | 3271 ++++++++ 2.0.0a3/assets/_mkdocstrings.css | 119 + 2.0.0a3/assets/images/favicon.png | Bin 0 -> 1870 bytes .../assets/javascripts/bundle.081f42fc.min.js | 29 + .../javascripts/bundle.081f42fc.min.js.map | 7 + .../javascripts/lunr/min/lunr.ar.min.js | 1 + .../javascripts/lunr/min/lunr.da.min.js | 18 + .../javascripts/lunr/min/lunr.de.min.js | 18 + .../javascripts/lunr/min/lunr.du.min.js | 18 + .../javascripts/lunr/min/lunr.el.min.js | 1 + .../javascripts/lunr/min/lunr.es.min.js | 18 + .../javascripts/lunr/min/lunr.fi.min.js | 18 + .../javascripts/lunr/min/lunr.fr.min.js | 18 + .../javascripts/lunr/min/lunr.he.min.js | 1 + .../javascripts/lunr/min/lunr.hi.min.js | 1 + .../javascripts/lunr/min/lunr.hu.min.js | 18 + .../javascripts/lunr/min/lunr.hy.min.js | 1 + .../javascripts/lunr/min/lunr.it.min.js | 18 + .../javascripts/lunr/min/lunr.ja.min.js | 1 + .../javascripts/lunr/min/lunr.jp.min.js | 1 + .../javascripts/lunr/min/lunr.kn.min.js | 1 + .../javascripts/lunr/min/lunr.ko.min.js | 1 + .../javascripts/lunr/min/lunr.multi.min.js | 1 + .../javascripts/lunr/min/lunr.nl.min.js | 18 + .../javascripts/lunr/min/lunr.no.min.js | 18 + .../javascripts/lunr/min/lunr.pt.min.js | 18 + .../javascripts/lunr/min/lunr.ro.min.js | 18 + .../javascripts/lunr/min/lunr.ru.min.js | 18 + .../javascripts/lunr/min/lunr.sa.min.js | 1 + .../lunr/min/lunr.stemmer.support.min.js | 1 + .../javascripts/lunr/min/lunr.sv.min.js | 18 + .../javascripts/lunr/min/lunr.ta.min.js | 1 + .../javascripts/lunr/min/lunr.te.min.js | 1 + .../javascripts/lunr/min/lunr.th.min.js | 1 + .../javascripts/lunr/min/lunr.tr.min.js | 18 + .../javascripts/lunr/min/lunr.vi.min.js | 1 + .../javascripts/lunr/min/lunr.zh.min.js | 1 + 2.0.0a3/assets/javascripts/lunr/tinyseg.js | 206 + 2.0.0a3/assets/javascripts/lunr/wordcut.js | 6708 +++++++++++++++++ .../workers/search.b8dbb3d2.min.js | 42 + .../workers/search.b8dbb3d2.min.js.map | 7 + .../assets/stylesheets/main.6543a935.min.css | 1 + .../stylesheets/main.6543a935.min.css.map | 1 + .../stylesheets/palette.06af60db.min.css | 1 + .../stylesheets/palette.06af60db.min.css.map | 1 + 2.0.0a3/concepts/bigscape/index.html | 1470 ++++ 2.0.0a3/concepts/config_file/index.html | 1645 ++++ 2.0.0a3/concepts/gnps_data/index.html | 1614 ++++ .../concepts/working_dir_structure/index.html | 1548 ++++ 2.0.0a3/diagrams/arranger/index.html | 1636 ++++ 2.0.0a3/diagrams/loader/index.html | 1473 ++++ 2.0.0a3/figure/data_loading_pipeline.svg | 4 + 2.0.0a3/index.html | 1467 ++++ 2.0.0a3/install/index.html | 1548 ++++ 2.0.0a3/logging/index.html | 1575 ++++ 2.0.0a3/objects.inv | Bin 0 -> 4746 bytes 2.0.0a3/quickstart/index.html | 1861 +++++ 2.0.0a3/search/search_index.json | 1 + 2.0.0a3/sitemap.xml | 158 + 2.0.0a3/sitemap.xml.gz | Bin 0 -> 416 bytes latest | 2 +- versions.json | 5 + 83 files changed, 84801 insertions(+), 1 deletion(-) create mode 100644 2.0.0a3/404.html create mode 100644 2.0.0a3/api/antismash/index.html create mode 100644 2.0.0a3/api/arranger/index.html create mode 100644 2.0.0a3/api/bigscape/index.html create mode 100644 2.0.0a3/api/genomics/index.html create mode 100644 2.0.0a3/api/genomics_abc/index.html create mode 100644 2.0.0a3/api/genomics_utils/index.html create mode 100644 2.0.0a3/api/gnps/index.html create mode 100644 2.0.0a3/api/loader/index.html create mode 100644 2.0.0a3/api/metabolomics/index.html create mode 100644 2.0.0a3/api/metabolomics_abc/index.html create mode 100644 2.0.0a3/api/metabolomics_utils/index.html create mode 100644 2.0.0a3/api/mibig/index.html create mode 100644 2.0.0a3/api/nplinker/index.html create mode 100644 2.0.0a3/api/schema/index.html create mode 100644 2.0.0a3/api/scoring/index.html create mode 100644 2.0.0a3/api/scoring_abc/index.html create mode 100644 2.0.0a3/api/scoring_methods/index.html create mode 100644 2.0.0a3/api/scoring_utils/index.html create mode 100644 2.0.0a3/api/strain/index.html create mode 100644 2.0.0a3/api/strain_utils/index.html create mode 100644 2.0.0a3/api/utils/index.html create mode 100644 2.0.0a3/assets/_mkdocstrings.css create mode 100644 2.0.0a3/assets/images/favicon.png create mode 100644 2.0.0a3/assets/javascripts/bundle.081f42fc.min.js create mode 100644 2.0.0a3/assets/javascripts/bundle.081f42fc.min.js.map create mode 100644 2.0.0a3/assets/javascripts/lunr/min/lunr.ar.min.js create mode 100644 2.0.0a3/assets/javascripts/lunr/min/lunr.da.min.js create mode 100644 2.0.0a3/assets/javascripts/lunr/min/lunr.de.min.js create mode 100644 2.0.0a3/assets/javascripts/lunr/min/lunr.du.min.js create mode 100644 2.0.0a3/assets/javascripts/lunr/min/lunr.el.min.js create mode 100644 2.0.0a3/assets/javascripts/lunr/min/lunr.es.min.js create mode 100644 2.0.0a3/assets/javascripts/lunr/min/lunr.fi.min.js create mode 100644 2.0.0a3/assets/javascripts/lunr/min/lunr.fr.min.js create mode 100644 2.0.0a3/assets/javascripts/lunr/min/lunr.he.min.js create mode 100644 2.0.0a3/assets/javascripts/lunr/min/lunr.hi.min.js create mode 100644 2.0.0a3/assets/javascripts/lunr/min/lunr.hu.min.js create mode 100644 2.0.0a3/assets/javascripts/lunr/min/lunr.hy.min.js create mode 100644 2.0.0a3/assets/javascripts/lunr/min/lunr.it.min.js create mode 100644 2.0.0a3/assets/javascripts/lunr/min/lunr.ja.min.js create mode 100644 2.0.0a3/assets/javascripts/lunr/min/lunr.jp.min.js create mode 100644 2.0.0a3/assets/javascripts/lunr/min/lunr.kn.min.js create mode 100644 2.0.0a3/assets/javascripts/lunr/min/lunr.ko.min.js create mode 100644 2.0.0a3/assets/javascripts/lunr/min/lunr.multi.min.js create mode 100644 2.0.0a3/assets/javascripts/lunr/min/lunr.nl.min.js create mode 100644 2.0.0a3/assets/javascripts/lunr/min/lunr.no.min.js create mode 100644 2.0.0a3/assets/javascripts/lunr/min/lunr.pt.min.js create mode 100644 2.0.0a3/assets/javascripts/lunr/min/lunr.ro.min.js create mode 100644 2.0.0a3/assets/javascripts/lunr/min/lunr.ru.min.js create mode 100644 2.0.0a3/assets/javascripts/lunr/min/lunr.sa.min.js create mode 100644 2.0.0a3/assets/javascripts/lunr/min/lunr.stemmer.support.min.js create mode 100644 2.0.0a3/assets/javascripts/lunr/min/lunr.sv.min.js create mode 100644 2.0.0a3/assets/javascripts/lunr/min/lunr.ta.min.js create mode 100644 2.0.0a3/assets/javascripts/lunr/min/lunr.te.min.js create mode 100644 2.0.0a3/assets/javascripts/lunr/min/lunr.th.min.js create mode 100644 2.0.0a3/assets/javascripts/lunr/min/lunr.tr.min.js create mode 100644 2.0.0a3/assets/javascripts/lunr/min/lunr.vi.min.js create mode 100644 2.0.0a3/assets/javascripts/lunr/min/lunr.zh.min.js create mode 100644 2.0.0a3/assets/javascripts/lunr/tinyseg.js create mode 100644 2.0.0a3/assets/javascripts/lunr/wordcut.js create mode 100644 2.0.0a3/assets/javascripts/workers/search.b8dbb3d2.min.js create mode 100644 2.0.0a3/assets/javascripts/workers/search.b8dbb3d2.min.js.map create mode 100644 2.0.0a3/assets/stylesheets/main.6543a935.min.css create mode 100644 2.0.0a3/assets/stylesheets/main.6543a935.min.css.map create mode 100644 2.0.0a3/assets/stylesheets/palette.06af60db.min.css create mode 100644 2.0.0a3/assets/stylesheets/palette.06af60db.min.css.map create mode 100644 2.0.0a3/concepts/bigscape/index.html create mode 100644 2.0.0a3/concepts/config_file/index.html create mode 100644 2.0.0a3/concepts/gnps_data/index.html create mode 100644 2.0.0a3/concepts/working_dir_structure/index.html create mode 100644 2.0.0a3/diagrams/arranger/index.html create mode 100644 2.0.0a3/diagrams/loader/index.html create mode 100644 2.0.0a3/figure/data_loading_pipeline.svg create mode 100644 2.0.0a3/index.html create mode 100644 2.0.0a3/install/index.html create mode 100644 2.0.0a3/logging/index.html create mode 100644 2.0.0a3/objects.inv create mode 100644 2.0.0a3/quickstart/index.html create mode 100644 2.0.0a3/search/search_index.json create mode 100644 2.0.0a3/sitemap.xml create mode 100644 2.0.0a3/sitemap.xml.gz diff --git a/2.0.0a3/404.html b/2.0.0a3/404.html new file mode 100644 index 00000000..59a14c3b --- /dev/null +++ b/2.0.0a3/404.html @@ -0,0 +1,1414 @@ + + + + + + + + + + + + + + + + + + + + + NPLinker + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ +

404 - Not found

+ +
+
+ + + + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/2.0.0a3/api/antismash/index.html b/2.0.0a3/api/antismash/index.html new file mode 100644 index 00000000..5342924c --- /dev/null +++ b/2.0.0a3/api/antismash/index.html @@ -0,0 +1,3528 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + AntiSMASH - NPLinker + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + +

AntiSMASH

+ +
+ + + +

+ antismash + + +

+ +
+ + + +
+ + + + + + + + +
+ + + +

+ AntismashBGCLoader + + +

+
AntismashBGCLoader(data_dir: str | PathLike)
+
+ +
+

+ Bases: BGCLoaderBase

+ + +

Build a loader for AntiSMASH BGC genbank (.gbk) files.

+ + +
+ Note +

AntiSMASH BGC directory must follow the structure below: +

antismash
+    ├── genome_id_1 (one AntiSMASH output, e.g. GCF_000514775.1)
+    │  ├── GCF_000514775.1.gbk
+    │  ├── NZ_AZWO01000004.region001.gbk
+    │  └── ...
+    ├── genome_id_2
+    │  ├── ...
+    └── ...
+

+
+ + +

Parameters:

+ + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
data_dir + str | PathLike + +
+

Path to AntiSMASH directory that contains a +collection of AntiSMASH outputs.

+
+
+ required +
+ +
+ Source code in src/nplinker/genomics/antismash/antismash_loader.py +
37
+38
+39
+40
+41
+42
+43
+44
+45
+46
def __init__(self, data_dir: str | PathLike) -> None:
+    """Initialize the AntiSMASH BGC loader.
+
+    Args:
+        data_dir: Path to AntiSMASH directory that contains a
+            collection of AntiSMASH outputs.
+    """
+    self.data_dir = str(data_dir)
+    self._file_dict = self._parse_data_dir(self.data_dir)
+    self._bgcs = self._parse_bgcs(self._file_dict)
+
+
+ + + +
+ + + + + + + +
+ + + +

+ data_dir + + + + instance-attribute + + +

+
data_dir = str(data_dir)
+
+ +
+
+ +
+ + + +
+ + +

+ get_bgc_genome_mapping + + +

+
get_bgc_genome_mapping() -> dict[str, str]
+
+ +
+ +

Get the mapping from BGC to genome.

+

Note that the directory name of the gbk file is treated as genome id.

+ + +

Returns:

+ + + + + + + + + + + + + + + + + +
TypeDescription
+ dict[str, str] + +
+

The key is BGC name (gbk file name) and value is genome id (the directory name of the

+
+
+ dict[str, str] + +
+

gbk file).

+
+
+ +
+ Source code in src/nplinker/genomics/antismash/antismash_loader.py +
48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+58
+59
def get_bgc_genome_mapping(self) -> dict[str, str]:
+    """Get the mapping from BGC to genome.
+
+    Note that the directory name of the gbk file is treated as genome id.
+
+    Returns:
+        The key is BGC name (gbk file name) and value is genome id (the directory name of the
+        gbk file).
+    """
+    return {
+        bid: os.path.basename(os.path.dirname(bpath)) for bid, bpath in self._file_dict.items()
+    }
+
+
+
+ +
+ +
+ + +

+ get_files + + +

+
get_files() -> dict[str, str]
+
+ +
+ +

Get BGC gbk files.

+ + +

Returns:

+ + + + + + + + + + + + + +
TypeDescription
+ dict[str, str] + +
+

The key is BGC name (gbk file name) and value is path to the gbk file.

+
+
+ +
+ Source code in src/nplinker/genomics/antismash/antismash_loader.py +
61
+62
+63
+64
+65
+66
+67
def get_files(self) -> dict[str, str]:
+    """Get BGC gbk files.
+
+    Returns:
+        The key is BGC name (gbk file name) and value is path to the gbk file.
+    """
+    return self._file_dict
+
+
+
+ +
+ +
+ + +

+ get_bgcs + + +

+
get_bgcs() -> list[BGC]
+
+ +
+ +

Get all BGC objects.

+ + +

Returns:

+ + + + + + + + + + + + + +
TypeDescription
+ list[BGC] + +
+

A list of BGC objects

+
+
+ +
+ Source code in src/nplinker/genomics/antismash/antismash_loader.py +
def get_bgcs(self) -> list[BGC]:
+    """Get all BGC objects.
+
+    Returns:
+        A list of BGC objects
+    """
+    return self._bgcs
+
+
+
+ +
+ + + +
+ +
+ +
+ +
+ + + +

+ GenomeStatus + + +

+
GenomeStatus(
+    original_id: str,
+    resolved_refseq_id: str = "",
+    resolve_attempted: bool = False,
+    bgc_path: str = "",
+)
+
+ +
+ + +

A class to represent the status of a single genome.

+

The status of genomes is tracked in a JSON file which has a name defined +in variable GENOME_STATUS_FILENAME.

+ + + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
original_id + str + +
+

The original ID of the genome.

+
+
+ required +
resolved_refseq_id + str + +
+

The resolved RefSeq ID of the +genome. Defaults to "".

+
+
+ '' +
resolve_attempted + bool + +
+

A flag indicating whether an +attempt to resolve the RefSeq ID has been made. Defaults to False.

+
+
+ False +
bgc_path + str + +
+

The path to the downloaded BGC file for +the genome. Defaults to "".

+
+
+ '' +
+ +
+ Source code in src/nplinker/genomics/antismash/podp_antismash_downloader.py +
37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+58
def __init__(
+    self,
+    original_id: str,
+    resolved_refseq_id: str = "",
+    resolve_attempted: bool = False,
+    bgc_path: str = "",
+):
+    """Initialize a GenomeStatus object for the given genome.
+
+    Args:
+        original_id: The original ID of the genome.
+        resolved_refseq_id: The resolved RefSeq ID of the
+            genome. Defaults to "".
+        resolve_attempted: A flag indicating whether an
+            attempt to resolve the RefSeq ID has been made. Defaults to False.
+        bgc_path: The path to the downloaded BGC file for
+            the genome. Defaults to "".
+    """
+    self.original_id = original_id
+    self.resolved_refseq_id = "" if resolved_refseq_id == "None" else resolved_refseq_id
+    self.resolve_attempted = resolve_attempted
+    self.bgc_path = bgc_path
+
+
+ + + +
+ + + + + + + +
+ + + +

+ original_id + + + + instance-attribute + + +

+
original_id = original_id
+
+ +
+
+ +
+ +
+ + + +

+ resolved_refseq_id + + + + instance-attribute + + +

+
resolved_refseq_id = (
+    ""
+    if resolved_refseq_id == "None"
+    else resolved_refseq_id
+)
+
+ +
+
+ +
+ +
+ + + +

+ resolve_attempted + + + + instance-attribute + + +

+
resolve_attempted = resolve_attempted
+
+ +
+
+ +
+ +
+ + + +

+ bgc_path + + + + instance-attribute + + +

+
bgc_path = bgc_path
+
+ +
+
+ +
+ + + +
+ + +

+ read_json + + + + staticmethod + + +

+
read_json(
+    file: str | PathLike,
+) -> dict[str, "GenomeStatus"]
+
+ +
+ +

Get a dict of GenomeStatus objects by loading given genome status file.

+

Note that an empty dict is returned if the given file doesn't exist.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
file + str | PathLike + +
+

Path to genome status file.

+
+
+ required +
+ + +

Returns:

+ + + + + + + + + + + + + +
TypeDescription
+ dict[str, 'GenomeStatus'] + +
+

Dict keys are genome original id and values are GenomeStatus +objects. An empty dict is returned if the given file doesn't exist.

+
+
+ +
+ Source code in src/nplinker/genomics/antismash/podp_antismash_downloader.py +
60
+61
+62
+63
+64
+65
+66
+67
+68
+69
+70
+71
+72
+73
+74
+75
+76
+77
+78
+79
+80
+81
+82
+83
+84
@staticmethod
+def read_json(file: str | PathLike) -> dict[str, "GenomeStatus"]:
+    """Get a dict of GenomeStatus objects by loading given genome status file.
+
+    Note that an empty dict is returned if the given file doesn't exist.
+
+    Args:
+        file: Path to genome status file.
+
+    Returns:
+        Dict keys are genome original id and values are GenomeStatus
+            objects. An empty dict is returned if the given file doesn't exist.
+    """
+    genome_status_dict = {}
+    if Path(file).exists():
+        with open(file, "r") as f:
+            data = json.load(f)
+
+        # validate json data before using it
+        validate(data, schema=GENOME_STATUS_SCHEMA)
+
+        genome_status_dict = {
+            gs["original_id"]: GenomeStatus(**gs) for gs in data["genome_status"]
+        }
+    return genome_status_dict
+
+
+
+ +
+ +
+ + +

+ to_json + + + + staticmethod + + +

+
to_json(
+    genome_status_dict: Mapping[str, "GenomeStatus"],
+    file: str | PathLike | None = None,
+) -> str | None
+
+ +
+ +

Convert the genome status dictionary to a JSON string.

+

If a file path is provided, the JSON string is written to the file. If +the file already exists, it is overwritten.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
genome_status_dict + Mapping[str, 'GenomeStatus'] + +
+

A dictionary of genome +status objects. The keys are the original genome IDs and the values +are GenomeStatus objects.

+
+
+ required +
file + str | PathLike | None + +
+

The path to the output JSON file. +If None, the JSON string is returned but not written to a file.

+
+
+ None +
+ + +

Returns:

+ + + + + + + + + + + + + +
TypeDescription
+ str | None + +
+

The JSON string if file is None, otherwise None.

+
+
+ +
+ Source code in src/nplinker/genomics/antismash/podp_antismash_downloader.py +
@staticmethod
+def to_json(
+    genome_status_dict: Mapping[str, "GenomeStatus"], file: str | PathLike | None = None
+) -> str | None:
+    """Convert the genome status dictionary to a JSON string.
+
+    If a file path is provided, the JSON string is written to the file. If
+    the file already exists, it is overwritten.
+
+    Args:
+        genome_status_dict: A dictionary of genome
+            status objects. The keys are the original genome IDs and the values
+            are GenomeStatus objects.
+        file: The path to the output JSON file.
+            If None, the JSON string is returned but not written to a file.
+
+    Returns:
+        The JSON string if `file` is None, otherwise None.
+    """
+    gs_list = [gs._to_dict() for gs in genome_status_dict.values()]
+    json_data = {"genome_status": gs_list, "version": "1.0"}
+
+    # validate json object before dumping
+    validate(json_data, schema=GENOME_STATUS_SCHEMA)
+
+    if file is not None:
+        with open(file, "w") as f:
+            json.dump(json_data, f)
+        return None
+    return json.dumps(json_data)
+
+
+
+ +
+ + + +
+ +
+ +
+ + +
+ + +

+ download_and_extract_antismash_data + + +

+
download_and_extract_antismash_data(
+    antismash_id: str,
+    download_root: str | PathLike,
+    extract_root: str | PathLike,
+) -> None
+
+ +
+ +

Download and extract antiSMASH BGC archive for a specified genome.

+

The antiSMASH database (https://antismash-db.secondarymetabolites.org/) +is used to download the BGC archive. And antiSMASH use RefSeq assembly id +of a genome as the id of the archive.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
antismash_id + str + +
+

The id used to download BGC archive from antiSMASH database. +If the id is versioned (e.g., "GCF_004339725.1") please be sure to +specify the version as well.

+
+
+ required +
download_root + str | PathLike + +
+

Path to the directory to place downloaded archive in.

+
+
+ required +
extract_root + str | PathLike + +
+

Path to the directory data files will be extracted to. +Note that an antismash directory will be created in the specified extract_root if +it doesn't exist. The files will be extracted to <extract_root>/antismash/<antismash_id> directory.

+
+
+ required +
+ + +

Raises:

+ + + + + + + + + + + + + +
TypeDescription
+ ValueError + +
+

if <extract_root>/antismash/<refseq_assembly_id> dir is not empty.

+
+
+ + +

Examples:

+
>>> download_and_extract_antismash_metadata("GCF_004339725.1", "/data/download", "/data/extracted")
+
+ +
+ Source code in src/nplinker/genomics/antismash/antismash_downloader.py +
20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+58
+59
+60
+61
+62
+63
+64
+65
+66
+67
+68
+69
+70
+71
+72
+73
+74
def download_and_extract_antismash_data(
+    antismash_id: str, download_root: str | PathLike, extract_root: str | PathLike
+) -> None:
+    """Download and extract antiSMASH BGC archive for a specified genome.
+
+    The antiSMASH database (https://antismash-db.secondarymetabolites.org/)
+    is used to download the BGC archive. And antiSMASH use RefSeq assembly id
+    of a genome as the id of the archive.
+
+    Args:
+        antismash_id: The id used to download BGC archive from antiSMASH database.
+            If the id is versioned (e.g., "GCF_004339725.1") please be sure to
+            specify the version as well.
+        download_root: Path to the directory to place downloaded archive in.
+        extract_root: Path to the directory data files will be extracted to.
+            Note that an `antismash` directory will be created in the specified `extract_root` if
+            it doesn't exist. The files will be extracted to `<extract_root>/antismash/<antismash_id>` directory.
+
+    Raises:
+        ValueError: if `<extract_root>/antismash/<refseq_assembly_id>` dir is not empty.
+
+    Examples:
+        >>> download_and_extract_antismash_metadata("GCF_004339725.1", "/data/download", "/data/extracted")
+    """
+    download_root = Path(download_root)
+    extract_root = Path(extract_root)
+    extract_path = extract_root / "antismash" / antismash_id
+
+    try:
+        if extract_path.exists():
+            _check_extract_path(extract_path)
+        else:
+            extract_path.mkdir(parents=True, exist_ok=True)
+
+        for base_url in [ANTISMASH_DB_DOWNLOAD_URL, ANTISMASH_DBV2_DOWNLOAD_URL]:
+            url = base_url.format(antismash_id, antismash_id + ".zip")
+            download_and_extract_archive(url, download_root, extract_path, antismash_id + ".zip")
+            break
+
+        # delete subdirs
+        for subdir_path in list_dirs(extract_path):
+            shutil.rmtree(subdir_path)
+
+        # delete unnecessary files
+        files_to_keep = list_files(extract_path, suffix=(".json", ".gbk"))
+        for file in list_files(extract_path):
+            if file not in files_to_keep:
+                os.remove(file)
+
+        logger.info("antiSMASH BGC data of %s is downloaded and extracted.", antismash_id)
+
+    except Exception as e:
+        shutil.rmtree(extract_path)
+        logger.warning(e)
+        raise e
+
+
+
+ +
+ +
+ + +

+ parse_bgc_genbank + + +

+
parse_bgc_genbank(file: str | PathLike) -> BGC
+
+ +
+ +

Parse a single BGC gbk file to BGC object.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
file + str | PathLike + +
+

Path to BGC gbk file

+
+
+ required +
+ + +

Returns:

+ + + + + + + + + + + + + +
TypeDescription
+ BGC + +
+

BGC object

+
+
+ + +

Examples:

+
>>> bgc = AntismashBGCLoader.parse_bgc(
+...    "/data/antismash/GCF_000016425.1/NC_009380.1.region001.gbk")
+
+ +
+ Source code in src/nplinker/genomics/antismash/antismash_loader.py +
def parse_bgc_genbank(file: str | PathLike) -> BGC:
+    """Parse a single BGC gbk file to BGC object.
+
+    Args:
+        file: Path to BGC gbk file
+
+    Returns:
+        BGC object
+
+    Examples:
+        >>> bgc = AntismashBGCLoader.parse_bgc(
+        ...    "/data/antismash/GCF_000016425.1/NC_009380.1.region001.gbk")
+    """
+    file = Path(file)
+    fname = file.stem
+
+    record = SeqIO.read(file, format="genbank")
+    description = record.description  # "DEFINITION" in gbk file
+    antismash_id = record.id  # "VERSION" in gbk file
+    features = _parse_antismash_genbank(record)
+    product_prediction = features.get("product")
+    if product_prediction is None:
+        raise ValueError(f"Not found product prediction in antiSMASH Genbank file {file}")
+
+    # init BGC
+    bgc = BGC(fname, *product_prediction)
+    bgc.description = description
+    bgc.antismash_id = antismash_id
+    bgc.antismash_file = str(file)
+    bgc.antismash_region = features.get("region_number")
+    bgc.smiles = features.get("smiles")
+    bgc.strain = Strain(fname)
+    return bgc
+
+
+
+ +
+ +
+ + +

+ get_best_available_genome_id + + +

+
get_best_available_genome_id(
+    genome_id_data: Mapping[str, str]
+) -> str | None
+
+ +
+ +

Get the best available ID from genome_id_data dict.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
genome_id_data + Mapping[str, str] + +
+

dictionary containing information for each genome record present.

+
+
+ required +
+ + +

Returns:

+ + + + + + + + + + + + + +
TypeDescription
+ str | None + +
+

ID for the genome, if present, otherwise None.

+
+
+ +
+ Source code in src/nplinker/genomics/antismash/podp_antismash_downloader.py +
def get_best_available_genome_id(genome_id_data: Mapping[str, str]) -> str | None:
+    """Get the best available ID from genome_id_data dict.
+
+    Args:
+        genome_id_data: dictionary containing information for each genome record present.
+
+    Returns:
+        ID for the genome, if present, otherwise None.
+    """
+    if "RefSeq_accession" in genome_id_data:
+        best_id = genome_id_data["RefSeq_accession"]
+    elif "GenBank_accession" in genome_id_data:
+        best_id = genome_id_data["GenBank_accession"]
+    elif "JGI_Genome_ID" in genome_id_data:
+        best_id = genome_id_data["JGI_Genome_ID"]
+    else:
+        best_id = None
+
+    if best_id is None or len(best_id) == 0:
+        logger.warning(f"Failed to get valid genome ID in genome data: {genome_id_data}")
+        return None
+    return best_id
+
+
+
+ +
+ +
+ + +

+ podp_download_and_extract_antismash_data + + +

+
podp_download_and_extract_antismash_data(
+    genome_records: Sequence[
+        Mapping[str, Mapping[str, str]]
+    ],
+    project_download_root: str | PathLike,
+    project_extract_root: str | PathLike,
+)
+
+ +
+ +

Download and extract antiSMASH BGC archive for the given genome records.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
genome_records + Sequence[Mapping[str, Mapping[str, str]]] + +
+

list of dicts +representing genome records. The dict of each genome record contains + - key(str): "genome_ID" + - value(dict[str, str]): a dict containing information about genome + type, label and accession ids (RefSeq, GenBank, and/or JGI).

+
+
+ required +
project_download_root + str | PathLike + +
+

Path to the directory to place +downloaded archive in.

+
+
+ required +
project_extract_root + str | PathLike + +
+

Path to the directory downloaded archive +will be extracted to. +Note that an antismash directory will be created in the specified +extract_root if it doesn't exist. The files will be extracted to +<extract_root>/antismash/<antismash_id> directory.

+
+
+ required +
+ +
+ Source code in src/nplinker/genomics/antismash/podp_antismash_downloader.py +
def podp_download_and_extract_antismash_data(
+    genome_records: Sequence[Mapping[str, Mapping[str, str]]],
+    project_download_root: str | PathLike,
+    project_extract_root: str | PathLike,
+):
+    """Download and extract antiSMASH BGC archive for the given genome records.
+
+    Args:
+        genome_records: list of dicts
+            representing genome records. The dict of each genome record contains
+                - key(str): "genome_ID"
+                - value(dict[str, str]): a dict containing information about genome
+                type, label and accession ids (RefSeq, GenBank, and/or JGI).
+        project_download_root: Path to the directory to place
+            downloaded archive in.
+        project_extract_root: Path to the directory downloaded archive
+            will be extracted to.
+            Note that an `antismash` directory will be created in the specified
+            `extract_root` if it doesn't exist. The files will be extracted to
+            `<extract_root>/antismash/<antismash_id>` directory.
+    """
+    if not Path(project_download_root).exists():
+        # otherwise in case of failed first download, the folder doesn't exist and
+        # genome_status_file can't be written
+        Path(project_download_root).mkdir(parents=True, exist_ok=True)
+
+    gs_file = Path(project_download_root, GENOME_STATUS_FILENAME)
+    gs_dict = GenomeStatus.read_json(gs_file)
+
+    for i, genome_record in enumerate(genome_records):
+        # get the best available ID from the dict
+        genome_id_data = genome_record["genome_ID"]
+        raw_genome_id = get_best_available_genome_id(genome_id_data)
+        if raw_genome_id is None or len(raw_genome_id) == 0:
+            logger.warning(f'Invalid input genome record "{genome_record}"')
+            continue
+
+        # check if genome ID exist in the genome status file
+        if raw_genome_id not in gs_dict:
+            gs_dict[raw_genome_id] = GenomeStatus(raw_genome_id)
+
+        gs_obj = gs_dict[raw_genome_id]
+
+        logger.info(
+            f"Checking for antismash data {i + 1}/{len(genome_records)}, "
+            f"current genome ID={raw_genome_id}"
+        )
+        # first, check if BGC data is downloaded
+        if gs_obj.bgc_path and Path(gs_obj.bgc_path).exists():
+            logger.info(f"Genome ID {raw_genome_id} already downloaded to {gs_obj.bgc_path}")
+            continue
+        # second, check if lookup attempted previously
+        if gs_obj.resolve_attempted:
+            logger.info(f"Genome ID {raw_genome_id} skipped due to previous failed attempt")
+            continue
+
+        # if not downloaded or lookup attempted, then try to resolve the ID
+        # and download
+        logger.info(f"Start lookup process for genome ID {raw_genome_id}")
+        gs_obj.resolved_refseq_id = _resolve_refseq_id(genome_id_data)
+        gs_obj.resolve_attempted = True
+
+        if gs_obj.resolved_refseq_id == "":
+            # give up on this one
+            logger.warning(f"Failed lookup for genome ID {raw_genome_id}")
+            continue
+
+        # if resolved id is valid, try to download and extract antismash data
+        try:
+            download_and_extract_antismash_data(
+                gs_obj.resolved_refseq_id, project_download_root, project_extract_root
+            )
+
+            gs_obj.bgc_path = str(
+                Path(project_download_root, gs_obj.resolved_refseq_id + ".zip").absolute()
+            )
+
+            output_path = Path(project_extract_root, "antismash", gs_obj.resolved_refseq_id)
+            if output_path.exists():
+                Path.touch(output_path / "completed", exist_ok=True)
+
+        except Exception:
+            gs_obj.bgc_path = ""
+
+    # raise and log warning for failed downloads
+    failed_ids = [gs.original_id for gs in gs_dict.values() if not gs.bgc_path]
+    if failed_ids:
+        warning_message = (
+            f"Failed to download antiSMASH data for the following genome IDs: {failed_ids}"
+        )
+        logger.warning(warning_message)
+        warnings.warn(warning_message, UserWarning)
+
+    # save updated genome status to json file
+    GenomeStatus.to_json(gs_dict, gs_file)
+
+    if len(failed_ids) == len(genome_records):
+        raise ValueError("No antiSMASH data found for any genome")
+
+
+
+ +
+ + + +
+ +
+ +
+ + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/2.0.0a3/api/arranger/index.html b/2.0.0a3/api/arranger/index.html new file mode 100644 index 00000000..8cde78a1 --- /dev/null +++ b/2.0.0a3/api/arranger/index.html @@ -0,0 +1,3457 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + Dataset Arranger - NPLinker + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + +

Dataset Arranger

+ +
+ + + +

+ arranger + + +

+ +
+ + + +
+ + + + + + + +
+ + + +

+ PODP_PROJECT_URL + + + + module-attribute + + +

+
PODP_PROJECT_URL = "https://pairedomicsdata.bioinformatics.nl/api/projects/{}"
+
+ +
+
+ +
+ + +
+ + + +

+ DatasetArranger + + +

+
DatasetArranger(config: Dynaconf)
+
+ +
+ + +

Arrange the dataset required by NPLinker.

+

This class is used to arrange the datasets required by NPLinker according to the +configuration. The datasets include MIBiG, GNPS, antiSMASH, and BiG-SCAPE.

+

If self.config.mode is "local", the datasets are validated. +If self.config.mode is "podp", the datasets are downloaded or generated.

+ + +

Attributes:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescription
config + +
+

A Dynaconf object that contains the configuration settings. Check nplinker.config +module for more details.

+
+
root_dir + +
+

The root directory of the datasets.

+
+
downloads_dir + +
+

The directory to store downloaded files.

+
+
mibig_dir + +
+

The directory to store MIBiG metadata.

+
+
gnps_dir + +
+

The directory to store GNPS data.

+
+
antismash_dir + +
+

The directory to store antiSMASH data.

+
+
bigscape_dir + +
+

The directory to store BiG-SCAPE data.

+
+
bigscape_running_output_dir + +
+

The directory to store the running output of BiG-SCAPE.

+
+
+ + + +

Parameters:

+ + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
config + Dynaconf + +
+

A Dynaconf object that contains the configuration settings. Check nplinker.config +module for more details.

+
+
+ required +
+ +
+ Source code in src/nplinker/arranger.py +
50
+51
+52
+53
+54
+55
+56
+57
+58
+59
+60
+61
+62
+63
+64
+65
+66
+67
+68
+69
+70
def __init__(self, config: Dynaconf) -> None:
+    """Initialize the DatasetArranger.
+
+    Args:
+        config: A Dynaconf object that contains the configuration settings. Check `nplinker.config`
+            module for more details.
+    """
+    self.config = config
+    self.root_dir = config.root_dir
+    self.downloads_dir = self.root_dir / defaults.DOWNLOADS_DIRNAME
+    self.downloads_dir.mkdir(exist_ok=True)
+
+    self.mibig_dir = self.root_dir / defaults.MIBIG_DIRNAME
+    self.gnps_dir = self.root_dir / defaults.GNPS_DIRNAME
+    self.antismash_dir = self.root_dir / defaults.ANTISMASH_DIRNAME
+    self.bigscape_dir = self.root_dir / defaults.BIGSCAPE_DIRNAME
+    self.bigscape_running_output_dir = (
+        self.bigscape_dir / defaults.BIGSCAPE_RUNNING_OUTPUT_DIRNAME
+    )
+
+    self.arrange_podp_project_json()
+
+
+ + + +
+ + + + + + + +
+ + + +

+ config + + + + instance-attribute + + +

+
config = config
+
+ +
+
+ +
+ +
+ + + +

+ root_dir + + + + instance-attribute + + +

+
root_dir = root_dir
+
+ +
+
+ +
+ +
+ + + +

+ downloads_dir + + + + instance-attribute + + +

+
downloads_dir = root_dir / DOWNLOADS_DIRNAME
+
+ +
+
+ +
+ +
+ + + +

+ mibig_dir + + + + instance-attribute + + +

+
mibig_dir = root_dir / MIBIG_DIRNAME
+
+ +
+
+ +
+ +
+ + + +

+ gnps_dir + + + + instance-attribute + + +

+
gnps_dir = root_dir / GNPS_DIRNAME
+
+ +
+
+ +
+ +
+ + + +

+ antismash_dir + + + + instance-attribute + + +

+
antismash_dir = root_dir / ANTISMASH_DIRNAME
+
+ +
+
+ +
+ +
+ + + +

+ bigscape_dir + + + + instance-attribute + + +

+
bigscape_dir = root_dir / BIGSCAPE_DIRNAME
+
+ +
+
+ +
+ +
+ + + +

+ bigscape_running_output_dir + + + + instance-attribute + + +

+
bigscape_running_output_dir = (
+    bigscape_dir / BIGSCAPE_RUNNING_OUTPUT_DIRNAME
+)
+
+ +
+
+ +
+ + + +
+ + +

+ arrange + + +

+
arrange() -> None
+
+ +
+ +

Arrange the datasets according to the configuration.

+

The datasets include MIBiG, GNPS, antiSMASH, and BiG-SCAPE.

+ +
+ Source code in src/nplinker/arranger.py +
72
+73
+74
+75
+76
+77
+78
+79
+80
+81
+82
+83
def arrange(self) -> None:
+    """Arrange the datasets according to the configuration.
+
+    The datasets include MIBiG, GNPS, antiSMASH, and BiG-SCAPE.
+    """
+    # The order of arranging the datasets matters, as some datasets depend on others
+    self.arrange_mibig()
+    self.arrange_gnps()
+    self.arrange_antismash()
+    self.arrange_bigscape()
+    self.arrange_strain_mappings()
+    self.arrange_strains_selected()
+
+
+
+ +
+ +
+ + +

+ arrange_podp_project_json + + +

+
arrange_podp_project_json() -> None
+
+ +
+ +

Arrange the PODP project JSON file.

+

If self.config.mode is "podp", download the PODP project JSON file if it doesn't exist. Then +validate the PODP project JSON file if it exists or is downloaded.

+

The validation is controlled by the json schema schemas/podp_adapted_schema.json.

+ +
+ Source code in src/nplinker/arranger.py +
def arrange_podp_project_json(self) -> None:
+    """Arrange the PODP project JSON file.
+
+    If `self.config.mode` is "podp", download the PODP project JSON file if it doesn't exist. Then
+    validate the PODP project JSON file if it exists or is downloaded.
+
+    The validation is controlled by the json schema `schemas/podp_adapted_schema.json`.
+    """
+    if self.config.mode == "podp":
+        file_name = f"paired_datarecord_{self.config.podp_id}.json"
+        podp_file = self.downloads_dir / file_name
+        if not podp_file.exists():
+            download_url(
+                PODP_PROJECT_URL.format(self.config.podp_id),
+                self.downloads_dir,
+                file_name,
+            )
+
+        with open(podp_file, "r") as f:
+            json_data = json.load(f)
+        validate_podp_json(json_data)
+
+
+
+ +
+ +
+ + +

+ arrange_mibig + + +

+
arrange_mibig() -> None
+
+ +
+ +

Arrange the MIBiG metadata.

+

Always download and extract the MIBiG metadata if self.config.mibig.to_use is True. +If the default directory has already existed, it will be removed and re-downloaded to ensure +the latest version is used. So it's not allowed to manually put MIBiG metadata in the +default directory.

+ +
+ Source code in src/nplinker/arranger.py +
def arrange_mibig(self) -> None:
+    """Arrange the MIBiG metadata.
+
+    Always download and extract the MIBiG metadata if `self.config.mibig.to_use` is True.
+    If the default directory has already existed, it will be removed and re-downloaded to ensure
+    the latest version is used. So it's not allowed to manually put MIBiG metadata in the
+    default directory.
+    """
+    if self.config.mibig.to_use:
+        if self.mibig_dir.exists():
+            # remove existing mibig data
+            shutil.rmtree(self.mibig_dir)
+        download_and_extract_mibig_metadata(
+            self.downloads_dir,
+            self.mibig_dir,
+            version=self.config.mibig.version,
+        )
+
+
+
+ +
+ +
+ + +

+ arrange_gnps + + +

+
arrange_gnps() -> None
+
+ +
+ +

Arrange the GNPS data.

+

If self.config.mode is "local", validate the GNPS data directory. +If self.config.mode is "podp", download the GNPS data if it doesn't exist or remove the +existing GNPS data and re-download it if it is invalid.

+

The validation process includes:

+
    +
  • Check if the GNPS data directory exists.
  • +
  • Check if the required files exist in the GNPS data directory, including:
      +
    • file_mappings.tsv or file_mappings.csv
    • +
    • spectra.mgf
    • +
    • molecular_families.tsv
    • +
    • annotations.tsv
    • +
    +
  • +
+ +
+ Source code in src/nplinker/arranger.py +
def arrange_gnps(self) -> None:
+    """Arrange the GNPS data.
+
+    If `self.config.mode` is "local", validate the GNPS data directory.
+    If `self.config.mode` is "podp", download the GNPS data if it doesn't exist or remove the
+    existing GNPS data and re-download it if it is invalid.
+
+    The validation process includes:
+
+    - Check if the GNPS data directory exists.
+    - Check if the required files exist in the GNPS data directory, including:
+        - file_mappings.tsv or file_mappings.csv
+        - spectra.mgf
+        - molecular_families.tsv
+        - annotations.tsv
+    """
+    pass_validation = False
+    if self.config.mode == "podp":
+        # retry downloading at most 3 times if downloaded data has problems
+        for _ in range(3):
+            try:
+                validate_gnps(self.gnps_dir)
+                pass_validation = True
+                break
+            except (FileNotFoundError, ValueError):
+                # Don't need to remove downloaded archive, as it'll be overwritten
+                shutil.rmtree(self.gnps_dir, ignore_errors=True)
+                self._download_and_extract_gnps()
+
+    if not pass_validation:
+        validate_gnps(self.gnps_dir)
+
+    # get the path to file_mappings file (csv or tsv)
+    self.gnps_file_mappings_file = self._get_gnps_file_mappings_file()
+
+
+
+ +
+ +
+ + +

+ arrange_antismash + + +

+
arrange_antismash() -> None
+
+ +
+ +

Arrange the antiSMASH data.

+

If self.config.mode is "local", validate the antiSMASH data directory. +If self.config.mode is "podp", download the antiSMASH data if it doesn't exist or remove the +existing antiSMASH data and re-download it if it is invalid.

+

The validation process includes: +- Check if the antiSMASH data directory exists. +- Check if the antiSMASH data directory contains at least one sub-directory, and each + sub-directory contains at least one BGC file (with the suffix ".region???.gbk" where ??? + is a number).

+

AntiSMASH BGC directory must follow the structure below: +

antismash
+    ├── genome_id_1 (one AntiSMASH output, e.g. GCF_000514775.1)
+    │  ├── GCF_000514775.1.gbk
+    │  ├── NZ_AZWO01000004.region001.gbk
+    │  └── ...
+    ├── genome_id_2
+    │  ├── ...
+    └── ...
+

+ +
+ Source code in src/nplinker/arranger.py +
def arrange_antismash(self) -> None:
+    """Arrange the antiSMASH data.
+
+    If `self.config.mode` is "local", validate the antiSMASH data directory.
+    If `self.config.mode` is "podp", download the antiSMASH data if it doesn't exist or remove the
+    existing antiSMASH data and re-download it if it is invalid.
+
+    The validation process includes:
+    - Check if the antiSMASH data directory exists.
+    - Check if the antiSMASH data directory contains at least one sub-directory, and each
+        sub-directory contains at least one BGC file (with the suffix ".region???.gbk" where ???
+        is a number).
+
+    AntiSMASH BGC directory must follow the structure below:
+    ```
+    antismash
+        ├── genome_id_1 (one AntiSMASH output, e.g. GCF_000514775.1)
+        │  ├── GCF_000514775.1.gbk
+        │  ├── NZ_AZWO01000004.region001.gbk
+        │  └── ...
+        ├── genome_id_2
+        │  ├── ...
+        └── ...
+    ```
+    """
+    pass_validation = False
+    if self.config.mode == "podp":
+        for _ in range(3):
+            try:
+                validate_antismash(self.antismash_dir)
+                pass_validation = True
+                break
+            except FileNotFoundError:
+                shutil.rmtree(self.antismash_dir, ignore_errors=True)
+                self._download_and_extract_antismash()
+
+    if not pass_validation:
+        validate_antismash(self.antismash_dir)
+
+
+
+ +
+ +
+ + +

+ arrange_bigscape + + +

+
arrange_bigscape() -> None
+
+ +
+ +

Arrange the BiG-SCAPE data.

+

If self.config.mode is "local", validate the BiG-SCAPE data directory. +If self.config.mode is "podp", run BiG-SCAPE to generate the clustering file if it doesn't +exist or remove the existing BiG-SCAPE data and re-run BiG-SCAPE if it is invalid. +The running output of BiG-SCAPE will be saved to the directory "bigscape_running_output" +in the default BiG-SCAPE directory, and the clustering file +"mix_clustering_c{self.config.bigscape.cutoff}.tsv" will be copied to the default BiG-SCAPE +directory.

+

The validation process includes:

+
    +
  • Check if the default BiG-SCAPE data directory exists.
  • +
  • Check if the clustering file "mix_clustering_c{self.config.bigscape.cutoff}.tsv" exists in the + BiG-SCAPE data directory.
  • +
  • Check if the 'data_sqlite.db' file exists in the BiG-SCAPE data directory.
  • +
+ +
+ Source code in src/nplinker/arranger.py +
def arrange_bigscape(self) -> None:
+    """Arrange the BiG-SCAPE data.
+
+    If `self.config.mode` is "local", validate the BiG-SCAPE data directory.
+    If `self.config.mode` is "podp", run BiG-SCAPE to generate the clustering file if it doesn't
+    exist or remove the existing BiG-SCAPE data and re-run BiG-SCAPE if it is invalid.
+    The running output of BiG-SCAPE will be saved to the directory "bigscape_running_output"
+    in the default BiG-SCAPE directory, and the clustering file
+    "mix_clustering_c{self.config.bigscape.cutoff}.tsv" will be copied to the default BiG-SCAPE
+    directory.
+
+    The validation process includes:
+
+    - Check if the default BiG-SCAPE data directory exists.
+    - Check if the clustering file "mix_clustering_c{self.config.bigscape.cutoff}.tsv" exists in the
+            BiG-SCAPE data directory.
+    - Check if the 'data_sqlite.db' file exists in the BiG-SCAPE data directory.
+    """
+    pass_validation = False
+    if self.config.mode == "podp":
+        for _ in range(3):
+            try:
+                validate_bigscape(self.bigscape_dir, self.config.bigscape.cutoff)
+                pass_validation = True
+                break
+            except FileNotFoundError:
+                shutil.rmtree(self.bigscape_dir, ignore_errors=True)
+                self._run_bigscape()
+
+    if not pass_validation:
+        validate_bigscape(self.bigscape_dir, self.config.bigscape.cutoff)
+
+
+
+ +
+ +
+ + +

+ arrange_strain_mappings + + +

+
arrange_strain_mappings() -> None
+
+ +
+ +

Arrange the strain mappings file.

+

If self.config.mode is "local", validate the strain mappings file. +If self.config.mode is "podp", always generate the strain mappings file and validate it.

+

The validation checks if the strain mappings file exists and if it is a valid JSON file +according to the schema defined in schemas/strain_mappings_schema.json.

+ +
+ Source code in src/nplinker/arranger.py +
def arrange_strain_mappings(self) -> None:
+    """Arrange the strain mappings file.
+
+    If `self.config.mode` is "local", validate the strain mappings file.
+    If `self.config.mode` is "podp", always generate the strain mappings file and validate it.
+
+    The validation checks if the strain mappings file exists and if it is a valid JSON file
+    according to the schema defined in `schemas/strain_mappings_schema.json`.
+    """
+    if self.config.mode == "podp":
+        self._generate_strain_mappings()
+
+    self._validate_strain_mappings()
+
+
+
+ +
+ +
+ + +

+ arrange_strains_selected + + +

+
arrange_strains_selected() -> None
+
+ +
+ +

Arrange the strains selected file.

+

Validate the strains selected file if it exists. +The validation checks if the strains selected file is a valid JSON file according to the +schema defined in schemas/user_strains.json.

+ +
+ Source code in src/nplinker/arranger.py +
def arrange_strains_selected(self) -> None:
+    """Arrange the strains selected file.
+
+    Validate the strains selected file if it exists.
+    The validation checks if the strains selected file is a valid JSON file according to the
+    schema defined in `schemas/user_strains.json`.
+    """
+    strains_selected_file = self.root_dir / defaults.STRAINS_SELECTED_FILENAME
+    if strains_selected_file.exists():
+        with open(strains_selected_file, "r") as f:
+            json_data = json.load(f)
+        validate(instance=json_data, schema=USER_STRAINS_SCHEMA)
+
+
+
+ +
+ + + +
+ +
+ +
+ + +
+ + +

+ validate_gnps + + +

+
validate_gnps(gnps_dir: str | PathLike) -> None
+
+ +
+ +

Validate the GNPS data directory and its contents.

+

The GNPS data directory must contain the following files:

+
    +
  • file_mappings.tsv or file_mappings.csv
  • +
  • spectra.mgf
  • +
  • molecular_families.tsv
  • +
  • annotations.tsv
  • +
+ + +

Parameters:

+ + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
gnps_dir + str | PathLike + +
+

Path to the GNPS data directory.

+
+
+ required +
+ + +

Raises:

+ + + + + + + + + + + + + + + + + +
TypeDescription
+ FileNotFoundError + +
+

If the GNPS data directory is not found or any of the required files +is not found.

+
+
+ ValueError + +
+

If both file_mappings.tsv and file_mapping.csv are found.

+
+
+ +
+ Source code in src/nplinker/arranger.py +
def validate_gnps(gnps_dir: str | PathLike) -> None:
+    """Validate the GNPS data directory and its contents.
+
+    The GNPS data directory must contain the following files:
+
+    - file_mappings.tsv or file_mappings.csv
+    - spectra.mgf
+    - molecular_families.tsv
+    - annotations.tsv
+
+    Args:
+        gnps_dir: Path to the GNPS data directory.
+
+    Raises:
+        FileNotFoundError: If the GNPS data directory is not found or any of the required files
+            is not found.
+        ValueError: If both file_mappings.tsv and file_mapping.csv are found.
+    """
+    gnps_dir = Path(gnps_dir)
+    if not gnps_dir.exists():
+        raise FileNotFoundError(f"GNPS data directory not found at {gnps_dir}")
+
+    file_mappings_tsv = gnps_dir / defaults.GNPS_FILE_MAPPINGS_TSV
+    file_mappings_csv = gnps_dir / defaults.GNPS_FILE_MAPPINGS_CSV
+    if file_mappings_tsv.exists() and file_mappings_csv.exists():
+        raise ValueError(
+            f"Both {file_mappings_tsv.name} and {file_mappings_csv.name} found in GNPS directory "
+            f"{gnps_dir}, only one is allowed."
+        )
+    elif not file_mappings_tsv.exists() and not file_mappings_csv.exists():
+        raise FileNotFoundError(
+            f"Neither {file_mappings_tsv.name} nor {file_mappings_csv.name} found in GNPS directory"
+            f" {gnps_dir}"
+        )
+
+    required_files = [
+        gnps_dir / defaults.GNPS_SPECTRA_FILENAME,
+        gnps_dir / defaults.GNPS_MOLECULAR_FAMILY_FILENAME,
+        gnps_dir / defaults.GNPS_ANNOTATIONS_FILENAME,
+    ]
+    list_not_found = [f.name for f in required_files if not f.exists()]
+    if list_not_found:
+        raise FileNotFoundError(
+            f"Files not found in GNPS directory {gnps_dir}: ', '.join({list_not_found})"
+        )
+
+
+
+ +
+ +
+ + +

+ validate_antismash + + +

+
validate_antismash(antismash_dir: str | PathLike) -> None
+
+ +
+ +

Validate the antiSMASH data directory and its contents.

+

The validation only checks the structure of the antiSMASH data directory and file names. +It does not check

+
    +
  • the content of the BGC files
  • +
  • the consistency between the antiSMASH data and the PODP project JSON file for the PODP + mode
  • +
+

The antiSMASH data directory must exist and contain at least one sub-directory. The name of the +sub-directories must not contain any space. Each sub-directory must contain at least one BGC +file (with the suffix ".region???.gbk" where ??? is the region number).

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
antismash_dir + str | PathLike + +
+

Path to the antiSMASH data directory.

+
+
+ required +
+ + +

Raises:

+ + + + + + + + + + + + + + + + + +
TypeDescription
+ FileNotFoundError + +
+

If the antiSMASH data directory is not found, or no sub-directories +are found in the antiSMASH data directory, or no BGC files are found in any +sub-directory.

+
+
+ ValueError + +
+

If any sub-directory name contains a space.

+
+
+ +
+ Source code in src/nplinker/arranger.py +
def validate_antismash(antismash_dir: str | PathLike) -> None:
+    """Validate the antiSMASH data directory and its contents.
+
+    The validation only checks the structure of the antiSMASH data directory and file names.
+    It does not check
+
+    - the content of the BGC files
+    - the consistency between the antiSMASH data and the PODP project JSON file for the PODP
+        mode
+
+    The antiSMASH data directory must exist and contain at least one sub-directory. The name of the
+    sub-directories must not contain any space. Each sub-directory must contain at least one BGC
+    file (with the suffix ".region???.gbk" where ??? is the region number).
+
+    Args:
+        antismash_dir: Path to the antiSMASH data directory.
+
+    Raises:
+        FileNotFoundError: If the antiSMASH data directory is not found, or no sub-directories
+            are found in the antiSMASH data directory, or no BGC files are found in any
+            sub-directory.
+        ValueError: If any sub-directory name contains a space.
+    """
+    antismash_dir = Path(antismash_dir)
+    if not antismash_dir.exists():
+        raise FileNotFoundError(f"antiSMASH data directory not found at {antismash_dir}")
+
+    sub_dirs = list_dirs(antismash_dir)
+    if not sub_dirs:
+        raise FileNotFoundError(
+            "No BGC directories found in antiSMASH data directory {antismash_dir}"
+        )
+
+    for sub_dir in sub_dirs:
+        dir_name = Path(sub_dir).name
+        if " " in dir_name:
+            raise ValueError(
+                f"antiSMASH sub-directory name {dir_name} contains space, which is not allowed"
+            )
+
+        gbk_files = list_files(sub_dir, suffix=".gbk", keep_parent=False)
+        bgc_files = fnmatch.filter(gbk_files, "*.region???.gbk")
+        if not bgc_files:
+            raise FileNotFoundError(f"No BGC files found in antiSMASH sub-directory {sub_dir}")
+
+
+
+ +
+ +
+ + +

+ validate_bigscape + + +

+
validate_bigscape(
+    bigscape_dir: str | PathLike, cutoff: str
+) -> None
+
+ +
+ +

Validate the BiG-SCAPE data directory and its contents.

+

The BiG-SCAPE data directory must exist and contain the clustering file +"mix_clustering_c{self.config.bigscape.cutoff}.tsv" where {self.config.bigscape.cutoff} is the +bigscape cutoff value set in the config file.

+

Alternatively, the directory can contain the BiG-SCAPE database file generated by BiG-SCAPE v2. +At the moment, all the family assignments in the database will be used, so this database should +contain results from a single run with the desired cutoff.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
bigscape_dir + str | PathLike + +
+

Path to the BiG-SCAPE data directory.

+
+
+ required +
cutoff + str + +
+

The BiG-SCAPE cutoff value.

+
+
+ required +
+ + +

Raises:

+ + + + + + + + + + + + + +
TypeDescription
+ FileNotFoundError + +
+

If the BiG-SCAPE data directory or the clustering file is not found.

+
+
+ +
+ Source code in src/nplinker/arranger.py +
def validate_bigscape(bigscape_dir: str | PathLike, cutoff: str) -> None:
+    """Validate the BiG-SCAPE data directory and its contents.
+
+    The BiG-SCAPE data directory must exist and contain the clustering file
+    "mix_clustering_c{self.config.bigscape.cutoff}.tsv" where {self.config.bigscape.cutoff} is the
+    bigscape cutoff value set in the config file.
+
+    Alternatively, the directory can contain the BiG-SCAPE database file generated by BiG-SCAPE v2.
+    At the moment, all the family assignments in the database will be used, so this database should
+    contain results from a single run with the desired cutoff.
+
+    Args:
+        bigscape_dir: Path to the BiG-SCAPE data directory.
+        cutoff: The BiG-SCAPE cutoff value.
+
+    Raises:
+        FileNotFoundError: If the BiG-SCAPE data directory or the clustering file is not found.
+    """
+    bigscape_dir = Path(bigscape_dir)
+    if not bigscape_dir.exists():
+        raise FileNotFoundError(f"BiG-SCAPE data directory not found at {bigscape_dir}")
+
+    clustering_file = bigscape_dir / f"mix_clustering_c{cutoff}.tsv"
+    database_file = bigscape_dir / "data_sqlite.db"
+    if not clustering_file.exists() and not database_file.exists():
+        raise FileNotFoundError(f"BiG-SCAPE data not found in {clustering_file} or {database_file}")
+
+
+
+ +
+ + + +
+ +
+ +
+ + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/2.0.0a3/api/bigscape/index.html b/2.0.0a3/api/bigscape/index.html new file mode 100644 index 00000000..bf6bddd4 --- /dev/null +++ b/2.0.0a3/api/bigscape/index.html @@ -0,0 +1,2440 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + BigScape - NPLinker + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

BigScape

+ +
+ + + +

+ bigscape + + +

+ +
+ + + +
+ + + + + + + + +
+ + + +

+ BigscapeGCFLoader + + +

+
BigscapeGCFLoader(cluster_file: str | PathLike)
+
+ +
+

+ Bases: GCFLoaderBase

+ + +

Build a loader for BiG-SCAPE GCF cluster file.

+ + +

Attributes:

+ + + + + + + + + + + + + + + +
NameTypeDescription
cluster_file + str + +
+

path to the BiG-SCAPE cluster file.

+
+
+ + + +

Parameters:

+ + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
cluster_file + str | PathLike + +
+

Path to the BiG-SCAPE cluster file, +the filename has a pattern of "_clustering_c0.xx.tsv".

+
+
+ required +
+ +
+ Source code in src/nplinker/genomics/bigscape/bigscape_loader.py +
20
+21
+22
+23
+24
+25
+26
+27
+28
def __init__(self, cluster_file: str | PathLike, /) -> None:
+    """Initialize the BiG-SCAPE GCF loader.
+
+    Args:
+        cluster_file: Path to the BiG-SCAPE cluster file,
+            the filename has a pattern of "<class>_clustering_c0.xx.tsv".
+    """
+    self.cluster_file: str = str(cluster_file)
+    self._gcf_list = self._parse_gcf(self.cluster_file)
+
+
+ + + +
+ + + + + + + +
+ + + +

+ cluster_file + + + + instance-attribute + + +

+
cluster_file: str = str(cluster_file)
+
+ +
+
+ +
+ + + +
+ + +

+ get_gcfs + + +

+
get_gcfs(
+    keep_mibig_only: bool = False,
+    keep_singleton: bool = False,
+) -> list[GCF]
+
+ +
+ +

Get all GCF objects.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
keep_mibig_only + bool + +
+

True to keep GCFs that contain only MIBiG +BGCs.

+
+
+ False +
keep_singleton + bool + +
+

True to keep singleton GCFs. A singleton GCF +is a GCF that contains only one BGC.

+
+
+ False +
+ + +

Returns:

+ + + + + + + + + + + + + +
TypeDescription
+ list[GCF] + +
+

A list of GCF objects.

+
+
+ +
+ Source code in src/nplinker/genomics/bigscape/bigscape_loader.py +
30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
def get_gcfs(self, keep_mibig_only: bool = False, keep_singleton: bool = False) -> list[GCF]:
+    """Get all GCF objects.
+
+    Args:
+        keep_mibig_only: True to keep GCFs that contain only MIBiG
+            BGCs.
+        keep_singleton: True to keep singleton GCFs. A singleton GCF
+            is a GCF that contains only one BGC.
+
+    Returns:
+        A list of GCF objects.
+    """
+    gcf_list = self._gcf_list
+    if not keep_mibig_only:
+        gcf_list = [gcf for gcf in gcf_list if not gcf.has_mibig_only()]
+    if not keep_singleton:
+        gcf_list = [gcf for gcf in gcf_list if not gcf.is_singleton()]
+    return gcf_list
+
+
+
+ +
+ + + +
+ +
+ +
+ +
+ + + +

+ BigscapeV2GCFLoader + + +

+
BigscapeV2GCFLoader(db_file: str | PathLike)
+
+ +
+

+ Bases: GCFLoaderBase

+ + +

Build a loader for BiG-SCAPE v2 database file.

+ + +

Attributes:

+ + + + + + + + + + + + + + + +
NameTypeDescription
db_file + +
+

Path to the BiG-SCAPE database file.

+
+
+ + + +

Parameters:

+ + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
db_file + str | PathLike + +
+

Path to the BiG-SCAPE v2 database file

+
+
+ required +
+ +
+ Source code in src/nplinker/genomics/bigscape/bigscape_loader.py +
71
+72
+73
+74
+75
+76
+77
+78
def __init__(self, db_file: str | PathLike, /) -> None:
+    """Initialize the BiG-SCAPE v2 GCF loader.
+
+    Args:
+        db_file: Path to the BiG-SCAPE v2 database file
+    """
+    self.db_file = str(db_file)
+    self._gcf_list = self._parse_gcf(self.db_file)
+
+
+ + + +
+ + + + + + + +
+ + + +

+ db_file + + + + instance-attribute + + +

+
db_file = str(db_file)
+
+ +
+
+ +
+ + + +
+ + +

+ get_gcfs + + +

+
get_gcfs(
+    keep_mibig_only: bool = False,
+    keep_singleton: bool = False,
+) -> list[GCF]
+
+ +
+ +

Get all GCF objects.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
keep_mibig_only + bool + +
+

True to keep GCFs that contain only MIBiG +BGCs.

+
+
+ False +
keep_singleton + bool + +
+

True to keep singleton GCFs. A singleton GCF +is a GCF that contains only one BGC.

+
+
+ False +
+ + +

Returns:

+ + + + + + + + + + + + + +
TypeDescription
+ list[GCF] + +
+

a list of GCF objects.

+
+
+ +
+ Source code in src/nplinker/genomics/bigscape/bigscape_loader.py +
80
+81
+82
+83
+84
+85
+86
+87
+88
+89
+90
+91
+92
+93
+94
+95
+96
+97
def get_gcfs(self, keep_mibig_only: bool = False, keep_singleton: bool = False) -> list[GCF]:
+    """Get all GCF objects.
+
+    Args:
+        keep_mibig_only: True to keep GCFs that contain only MIBiG
+            BGCs.
+        keep_singleton: True to keep singleton GCFs. A singleton GCF
+            is a GCF that contains only one BGC.
+
+    Returns:
+        a list of GCF objects.
+    """
+    gcf_list = self._gcf_list
+    if not keep_mibig_only:
+        gcf_list = [gcf for gcf in gcf_list if not gcf.has_mibig_only()]
+    if not keep_singleton:
+        gcf_list = [gcf for gcf in gcf_list if not gcf.is_singleton()]
+    return gcf_list
+
+
+
+ +
+ + + +
+ +
+ +
+ + +
+ + +

+ run_bigscape + + +

+
run_bigscape(
+    antismash_path: str | PathLike,
+    output_path: str | PathLike,
+    extra_params: str,
+)
+
+ +
+ +
+ Source code in src/nplinker/genomics/bigscape/runbigscape.py +
14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
def run_bigscape(
+    antismash_path: str | PathLike,
+    output_path: str | PathLike,
+    extra_params: str,
+):
+    bigscape_py_path = "bigscape.py"
+    logger.info(
+        f'run_bigscape: input="{antismash_path}", output="{output_path}", extra_params={extra_params}"'
+    )
+
+    try:
+        subprocess.run([bigscape_py_path, "-h"], capture_output=True, check=True)
+    except Exception as e:
+        raise Exception(f"Failed to find/run bigscape.py (path={bigscape_py_path}, err={e})") from e
+
+    if not os.path.exists(antismash_path):
+        raise Exception(f'antismash_path "{antismash_path}" does not exist!')
+
+    # configure the IO-related parameters, including pfam_dir
+    args = [bigscape_py_path, "-i", antismash_path, "-o", output_path, "--pfam_dir", PFAM_PATH]
+
+    # append the user supplied params, if any
+    if len(extra_params) > 0:
+        args.extend(extra_params.split(" "))
+
+    logger.info(f"BiG-SCAPE command: {args}")
+    result = subprocess.run(args, stdout=sys.stdout, stderr=sys.stderr, check=True)
+    logger.info(f"BiG-SCAPE completed with return code {result.returncode}")
+    # use subprocess.CompletedProcess.check_returncode() to test if the BiG-SCAPE
+    # process exited successfully. This throws an exception for non-zero returncodes
+    # which will indicate to the PODPDownloader module that something went wrong.
+    result.check_returncode()
+
+    return True
+
+
+
+ +
+ + + +
+ +
+ +
+ +
+ + +

+ run_bigscape + + +

+
run_bigscape(
+    antismash_path: str | PathLike,
+    output_path: str | PathLike,
+    extra_params: str,
+)
+
+ +
+ +
+ Source code in src/nplinker/genomics/bigscape/runbigscape.py +
14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
def run_bigscape(
+    antismash_path: str | PathLike,
+    output_path: str | PathLike,
+    extra_params: str,
+):
+    bigscape_py_path = "bigscape.py"
+    logger.info(
+        f'run_bigscape: input="{antismash_path}", output="{output_path}", extra_params={extra_params}"'
+    )
+
+    try:
+        subprocess.run([bigscape_py_path, "-h"], capture_output=True, check=True)
+    except Exception as e:
+        raise Exception(f"Failed to find/run bigscape.py (path={bigscape_py_path}, err={e})") from e
+
+    if not os.path.exists(antismash_path):
+        raise Exception(f'antismash_path "{antismash_path}" does not exist!')
+
+    # configure the IO-related parameters, including pfam_dir
+    args = [bigscape_py_path, "-i", antismash_path, "-o", output_path, "--pfam_dir", PFAM_PATH]
+
+    # append the user supplied params, if any
+    if len(extra_params) > 0:
+        args.extend(extra_params.split(" "))
+
+    logger.info(f"BiG-SCAPE command: {args}")
+    result = subprocess.run(args, stdout=sys.stdout, stderr=sys.stderr, check=True)
+    logger.info(f"BiG-SCAPE completed with return code {result.returncode}")
+    # use subprocess.CompletedProcess.check_returncode() to test if the BiG-SCAPE
+    # process exited successfully. This throws an exception for non-zero returncodes
+    # which will indicate to the PODPDownloader module that something went wrong.
+    result.check_returncode()
+
+    return True
+
+
+
+ +
+ + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/2.0.0a3/api/genomics/index.html b/2.0.0a3/api/genomics/index.html new file mode 100644 index 00000000..c03f6588 --- /dev/null +++ b/2.0.0a3/api/genomics/index.html @@ -0,0 +1,3357 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + Data Models - NPLinker + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + +

Data Models

+ +
+ + + +

+ genomics + + +

+ +
+ + + +
+ + + + + + + + +
+ + + +

+ BGC + + +

+
BGC(id: str, /, *product_prediction: str)
+
+ +
+ + +

Class to model BGC (biosynthetic gene cluster) data.

+

BGC data include both annotations and sequence data. This class is +mainly designed to model the annotations or metadata.

+

The raw BGC data is stored in GenBank format (.gbk). Additional +GenBank features +could be added to the GenBank file to annotate +BGCs, e.g. antiSMASH has some self-defined features (like region) in +its output GenBank files.

+

The annotations of BGC can be stored in JSON format, which is defined +and used by MIBiG.

+ + +

Attributes:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescription
id + +
+

BGC identifier, e.g. MIBiG accession, GenBank accession.

+
+
product_prediction + +
+

A tuple of (predicted) natural +products or product classes of the BGC. +For antiSMASH's GenBank data, the feature region /product +gives product information. +For MIBiG metadata, its biosynthetic class provides such info.

+
+
mibig_bgc_class + tuple[str] | None + +
+

A tuple of MIBiG biosynthetic +classes to which the BGC belongs. +Defaults to None. +MIBiG defines 6 major biosynthetic classes for natural products, +including "NRP", "Polyketide", "RiPP", "Terpene", "Saccharide" +and "Alkaloid". Note that natural products created by all other +biosynthetic mechanisms fall under the category "Other". +More details see the publication: https://doi.org/10.1186/s40793-018-0318-y.

+
+
description + str | None + +
+

Brief description of the BGC. +Defaults to None.

+
+
smiles + tuple[str] | None + +
+

A tuple of SMILES formulas of the BGC's +products. +Defaults to None.

+
+
antismash_file + str | None + +
+

The path to the antiSMASH GenBank file. +Defaults to None.

+
+
antismash_id + str | None + +
+

Identifier of the antiSMASH BGC, referring +to the feature VERSION of GenBank file. +Defaults to None.

+
+
antismash_region + int | None + +
+

AntiSMASH BGC region number, referring +to the feature region of GenBank file. +Defaults to None.

+
+
parents + set[GCF] + +
+

The set of GCFs that contain the BGC.

+
+
strain + Strain | None + +
+

The strain of the BGC.

+
+
+ + + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
id + str + +
+

BGC identifier, e.g. MIBiG accession, GenBank accession.

+
+
+ required +
product_prediction + str + +
+

BGC's (predicted) natural products or product classes.

+
+
+ () +
+ +
+ Source code in src/nplinker/genomics/bgc.py +
62
+63
+64
+65
+66
+67
+68
+69
+70
+71
+72
+73
+74
+75
+76
+77
+78
+79
+80
+81
+82
+83
+84
def __init__(self, id: str, /, *product_prediction: str):
+    """Initialize the BGC object.
+
+    Args:
+        id: BGC identifier, e.g. MIBiG accession, GenBank accession.
+        product_prediction: BGC's (predicted) natural products or product classes.
+    """
+    # BGC metadata
+    self.id = id
+    self.product_prediction = product_prediction
+
+    self.mibig_bgc_class: tuple[str] | None = None
+    self.description: str | None = None
+    self.smiles: tuple[str] | None = None
+
+    # antismash related attributes
+    self.antismash_file: str | None = None
+    self.antismash_id: str | None = None  # version in .gbk, id in SeqRecord
+    self.antismash_region: int | None = None  # antismash region number
+
+    # other attributes
+    self.parents: set[GCF] = set()
+    self._strain: Strain | None = None
+
+
+ + + +
+ + + + + + + +
+ + + +

+ id + + + + instance-attribute + + +

+
id = id
+
+ +
+
+ +
+ +
+ + + +

+ product_prediction + + + + instance-attribute + + +

+
product_prediction = product_prediction
+
+ +
+
+ +
+ +
+ + + +

+ mibig_bgc_class + + + + instance-attribute + + +

+
mibig_bgc_class: tuple[str] | None = None
+
+ +
+
+ +
+ +
+ + + +

+ description + + + + instance-attribute + + +

+
description: str | None = None
+
+ +
+
+ +
+ +
+ + + +

+ smiles + + + + instance-attribute + + +

+
smiles: tuple[str] | None = None
+
+ +
+
+ +
+ +
+ + + +

+ antismash_file + + + + instance-attribute + + +

+
antismash_file: str | None = None
+
+ +
+
+ +
+ +
+ + + +

+ antismash_id + + + + instance-attribute + + +

+
antismash_id: str | None = None
+
+ +
+
+ +
+ +
+ + + +

+ antismash_region + + + + instance-attribute + + +

+
antismash_region: int | None = None
+
+ +
+
+ +
+ +
+ + + +

+ parents + + + + instance-attribute + + +

+
parents: set[GCF] = set()
+
+ +
+
+ +
+ +
+ + + +

+ strain + + + + property + writable + + +

+
strain: Strain | None
+
+ +
+ +

Get the strain of the BGC.

+
+ +
+ +
+ + + +

+ bigscape_classes + + + + property + + +

+
bigscape_classes: set[str | None]
+
+ +
+ +

Get BiG-SCAPE's BGC classes.

+

BiG-SCAPE's BGC classes are similar to those defined in MiBIG but have +more categories (7 classes). More details see: +https://doi.org/10.1038%2Fs41589-019-0400-9.

+
+ +
+ +
+ + + +

+ aa_predictions + + + + property + + +

+
aa_predictions: list
+
+ +
+ +

Amino acids as predicted monomers of product.

+ + +

Returns:

+ + + + + + + + + + + + + + + + + +
TypeDescription
+ list + +
+

list of dicts with key as amino acid and value as prediction

+
+
+ list + +
+

probability.

+
+
+
+ +
+ + + +
+ + +

+ add_parent + + +

+
add_parent(gcf: GCF) -> None
+
+ +
+ +

Add a parent GCF to the BGC.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
gcf + GCF + +
+

gene cluster family

+
+
+ required +
+ +
+ Source code in src/nplinker/genomics/bgc.py +
def add_parent(self, gcf: GCF) -> None:
+    """Add a parent GCF to the BGC.
+
+    Args:
+        gcf: gene cluster family
+    """
+    gcf.add_bgc(self)
+
+
+
+ +
+ +
+ + +

+ detach_parent + + +

+
detach_parent(gcf: GCF) -> None
+
+ +
+ +

Remove a parent GCF.

+ +
+ Source code in src/nplinker/genomics/bgc.py +
def detach_parent(self, gcf: GCF) -> None:
+    """Remove a parent GCF."""
+    gcf.detach_bgc(self)
+
+
+
+ +
+ +
+ + +

+ is_mibig + + +

+
is_mibig() -> bool
+
+ +
+ +

Check if the BGC is MIBiG reference BGC or not.

+ + +
+ Note +

This method evaluates MIBiG BGC based on the pattern that MIBiG +BGC names start with "BGC". It might give false positive result.

+
+ +

Returns:

+ + + + + + + + + + + + + +
TypeDescription
+ bool + +
+

True if it's MIBiG reference BGC

+
+
+ +
+ Source code in src/nplinker/genomics/bgc.py +
def is_mibig(self) -> bool:
+    """Check if the BGC is MIBiG reference BGC or not.
+
+    Note:
+        This method evaluates MIBiG BGC based on the pattern that MIBiG
+        BGC names start with "BGC". It might give false positive result.
+
+    Returns:
+        True if it's MIBiG reference BGC
+    """
+    return self.id.startswith("BGC")
+
+
+
+ +
+ + + +
+ +
+ +
+ +
+ + + +

+ GCF + + +

+
GCF(id: str)
+
+ +
+ + +

Class to model gene cluster family (GCF).

+

GCF is a group of similar BGCs and generated by clustering BGCs with +tools such as BiG-SCAPE and BiG-SLICE.

+ + +

Attributes:

+ + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescription
id + +
+

id of the GCF object.

+
+
bgc_ids + set[str] + +
+

a set of BGC ids that belongs to the GCF.

+
+
bigscape_class + str | None + +
+

BiG-SCAPE's BGC class. +BiG-SCAPE's BGC classes are similar to those defined in MiBIG +but have more categories (7 classes). More details see: +https://doi.org/10.1038%2Fs41589-019-0400-9.

+
+
+ + + +

Parameters:

+ + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
id + str + +
+

id of the GCF object.

+
+
+ required +
+ +
+ Source code in src/nplinker/genomics/gcf.py +
29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
def __init__(self, id: str, /) -> None:
+    """Initialize the GCF object.
+
+    Args:
+        id: id of the GCF object.
+    """
+    self.id = id
+    self.bgc_ids: set[str] = set()
+    self.bigscape_class: str | None = None
+    self._bgcs: set[BGC] = set()
+    self._strains: StrainCollection = StrainCollection()
+
+
+ + + +
+ + + + + + + +
+ + + +

+ id + + + + instance-attribute + + +

+
id = id
+
+ +
+
+ +
+ +
+ + + +

+ bgc_ids + + + + instance-attribute + + +

+
bgc_ids: set[str] = set()
+
+ +
+
+ +
+ +
+ + + +

+ bigscape_class + + + + instance-attribute + + +

+
bigscape_class: str | None = None
+
+ +
+
+ +
+ +
+ + + +

+ bgcs + + + + property + + +

+
bgcs: set[BGC]
+
+ +
+ +

Get the BGC objects.

+
+ +
+ +
+ + + +

+ strains + + + + property + + +

+
strains: StrainCollection
+
+ +
+ +

Get the strains in the GCF.

+
+ +
+ + + +
+ + +

+ add_bgc + + +

+
add_bgc(bgc: BGC) -> None
+
+ +
+ +

Add a BGC object to the GCF.

+ +
+ Source code in src/nplinker/genomics/gcf.py +
77
+78
+79
+80
+81
+82
+83
+84
+85
def add_bgc(self, bgc: BGC) -> None:
+    """Add a BGC object to the GCF."""
+    bgc.parents.add(self)
+    self._bgcs.add(bgc)
+    self.bgc_ids.add(bgc.id)
+    if bgc.strain is not None:
+        self._strains.add(bgc.strain)
+    else:
+        logger.warning("No strain specified for the BGC %s", bgc.id)
+
+
+
+ +
+ +
+ + +

+ detach_bgc + + +

+
detach_bgc(bgc: BGC) -> None
+
+ +
+ +

Remove a child BGC object.

+ +
+ Source code in src/nplinker/genomics/gcf.py +
87
+88
+89
+90
+91
+92
+93
+94
+95
+96
def detach_bgc(self, bgc: BGC) -> None:
+    """Remove a child BGC object."""
+    bgc.parents.remove(self)
+    self._bgcs.remove(bgc)
+    self.bgc_ids.remove(bgc.id)
+    if bgc.strain is not None:
+        for other_bgc in self._bgcs:
+            if other_bgc.strain == bgc.strain:
+                return
+        self._strains.remove(bgc.strain)
+
+
+
+ +
+ +
+ + +

+ has_strain + + +

+
has_strain(strain: Strain) -> bool
+
+ +
+ +

Check if the given strain exists.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
strain + Strain + +
+

Strain object.

+
+
+ required +
+ + +

Returns:

+ + + + + + + + + + + + + +
TypeDescription
+ bool + +
+

True when the given strain exist.

+
+
+ +
+ Source code in src/nplinker/genomics/gcf.py +
def has_strain(self, strain: Strain) -> bool:
+    """Check if the given strain exists.
+
+    Args:
+        strain: `Strain` object.
+
+    Returns:
+        True when the given strain exist.
+    """
+    return strain in self._strains
+
+
+
+ +
+ +
+ + +

+ has_mibig_only + + +

+
has_mibig_only() -> bool
+
+ +
+ +

Check if the GCF's children are only MIBiG BGCs.

+ + +

Returns:

+ + + + + + + + + + + + + +
TypeDescription
+ bool + +
+

True if GCF.bgc_ids are only MIBiG BGC ids.

+
+
+ +
+ Source code in src/nplinker/genomics/gcf.py +
def has_mibig_only(self) -> bool:
+    """Check if the GCF's children are only MIBiG BGCs.
+
+    Returns:
+        True if `GCF.bgc_ids` are only MIBiG BGC ids.
+    """
+    return all(map(lambda id: id.startswith("BGC"), self.bgc_ids))
+
+
+
+ +
+ +
+ + +

+ is_singleton + + +

+
is_singleton() -> bool
+
+ +
+ +

Check if the GCF contains only one BGC.

+ + +

Returns:

+ + + + + + + + + + + + + +
TypeDescription
+ bool + +
+

True if GCF.bgc_ids contains only one BGC id.

+
+
+ +
+ Source code in src/nplinker/genomics/gcf.py +
def is_singleton(self) -> bool:
+    """Check if the GCF contains only one BGC.
+
+    Returns:
+        True if `GCF.bgc_ids` contains only one BGC id.
+    """
+    return len(self.bgc_ids) == 1
+
+
+
+ +
+ + + +
+ +
+ +
+ + + + +
+ +
+ +
+ + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/2.0.0a3/api/genomics_abc/index.html b/2.0.0a3/api/genomics_abc/index.html new file mode 100644 index 00000000..46c82bed --- /dev/null +++ b/2.0.0a3/api/genomics_abc/index.html @@ -0,0 +1,2088 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + Abstract Base Classes - NPLinker + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Abstract Base Classes

+ +
+ + + +

+ abc + + +

+ +
+ + + +
+ + + + + + + + +
+ + + +

+ BGCLoaderBase + + +

+
BGCLoaderBase(data_dir: str | PathLike)
+
+ +
+

+ Bases: ABC

+ + +

Abstract base class for BGC loader.

+ + + +

Parameters:

+ + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
data_dir + str | PathLike + +
+

Path to directory that contains BGC metadata files +(.json) or full data genbank files (.gbk).

+
+
+ required +
+ +
+ Source code in src/nplinker/genomics/abc.py +
12
+13
+14
+15
+16
+17
+18
+19
def __init__(self, data_dir: str | PathLike) -> None:
+    """Initialize the BGC loader.
+
+    Args:
+        data_dir: Path to directory that contains BGC metadata files
+            (.json) or full data genbank files (.gbk).
+    """
+    self.data_dir = str(data_dir)
+
+
+ + + +
+ + + + + + + +
+ + + +

+ data_dir + + + + instance-attribute + + +

+
data_dir = str(data_dir)
+
+ +
+
+ +
+ + + +
+ + +

+ get_files + + + + abstractmethod + + +

+
get_files() -> dict[str, str]
+
+ +
+ +

Get path to BGC files.

+ + +

Returns:

+ + + + + + + + + + + + + +
TypeDescription
+ dict[str, str] + +
+

The key is BGC name and value is path to BGC file

+
+
+ +
+ Source code in src/nplinker/genomics/abc.py +
21
+22
+23
+24
+25
+26
+27
@abstractmethod
+def get_files(self) -> dict[str, str]:
+    """Get path to BGC files.
+
+    Returns:
+        The key is BGC name and value is path to BGC file
+    """
+
+
+
+ +
+ +
+ + +

+ get_bgcs + + + + abstractmethod + + +

+
get_bgcs() -> list[BGC]
+
+ +
+ +

Get BGC objects.

+ + +

Returns:

+ + + + + + + + + + + + + +
TypeDescription
+ list[BGC] + +
+

A list of BGC objects

+
+
+ +
+ Source code in src/nplinker/genomics/abc.py +
29
+30
+31
+32
+33
+34
+35
@abstractmethod
+def get_bgcs(self) -> list[BGC]:
+    """Get BGC objects.
+
+    Returns:
+        A list of BGC objects
+    """
+
+
+
+ +
+ + + +
+ +
+ +
+ +
+ + + +

+ GCFLoaderBase + + +

+ + +
+

+ Bases: ABC

+ + +

Abstract base class for GCF loader.

+ + + + +
+ + + + + + + + + +
+ + +

+ get_gcfs + + + + abstractmethod + + +

+
get_gcfs(
+    keep_mibig_only: bool, keep_singleton: bool
+) -> list[GCF]
+
+ +
+ +

Get GCF objects.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
keep_mibig_only + bool + +
+

True to keep GCFs that contain only MIBiG +BGCs.

+
+
+ required +
keep_singleton + bool + +
+

True to keep singleton GCFs. A singleton GCF +is a GCF that contains only one BGC.

+
+
+ required +
+ + +

Returns:

+ + + + + + + + + + + + + +
TypeDescription
+ list[GCF] + +
+

A list of GCF objects

+
+
+ +
+ Source code in src/nplinker/genomics/abc.py +
41
+42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
@abstractmethod
+def get_gcfs(self, keep_mibig_only: bool, keep_singleton: bool) -> list[GCF]:
+    """Get GCF objects.
+
+    Args:
+        keep_mibig_only: True to keep GCFs that contain only MIBiG
+            BGCs.
+        keep_singleton: True to keep singleton GCFs. A singleton GCF
+            is a GCF that contains only one BGC.
+
+    Returns:
+        A list of GCF objects
+    """
+
+
+
+ +
+ + + +
+ +
+ +
+ + + + +
+ +
+ +
+ + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/2.0.0a3/api/genomics_utils/index.html b/2.0.0a3/api/genomics_utils/index.html new file mode 100644 index 00000000..99c61c38 --- /dev/null +++ b/2.0.0a3/api/genomics_utils/index.html @@ -0,0 +1,3056 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + Utilities - NPLinker + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + +

Utilities

+ +
+ + + +

+ utils + + +

+ +
+ + + +
+ + + + + + + +
+ + + +

+ logger + + + + module-attribute + + +

+
logger = getLogger(__name__)
+
+ +
+
+ +
+ + + +
+ + +

+ generate_mappings_genome_id_bgc_id + + +

+
generate_mappings_genome_id_bgc_id(
+    bgc_dir: str | PathLike,
+    output_file: str | PathLike | None = None,
+) -> None
+
+ +
+ +

Generate a file that maps genome id to BGC id.

+

Note that the output_file will be overwritten if it already exists.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
bgc_dir + str | PathLike + +
+

The directory has one-layer of subfolders and +each subfolder contains BGC files in .gbk format. +It assumes that +- the subfolder name is the genome id (e.g. refseq), +- the BGC file name is the BGC id.

+
+
+ required +
output_file + str | PathLike | None + +
+

The path to the output file. Note +that the file will be overwritten if it already exists. +Defaults to None, in which case the output file will be placed in +the directory bgc_dir with a file name defined in global variable +GENOME_BGC_MAPPINGS_FILENAME.

+
+
+ None +
+ +
+ Source code in src/nplinker/genomics/utils.py +
24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+58
+59
+60
+61
+62
+63
+64
+65
+66
+67
def generate_mappings_genome_id_bgc_id(
+    bgc_dir: str | PathLike, output_file: str | PathLike | None = None
+) -> None:
+    """Generate a file that maps genome id to BGC id.
+
+    Note that the `output_file` will be overwritten if it already exists.
+
+    Args:
+        bgc_dir: The directory has one-layer of subfolders and
+            each subfolder contains BGC files in `.gbk` format.
+            It assumes that
+            - the subfolder name is the genome id (e.g. refseq),
+            - the BGC file name is the BGC id.
+        output_file: The path to the output file. Note
+            that the file will be overwritten if it already exists.
+            Defaults to None, in which case the output file will be placed in
+            the directory `bgc_dir` with a file name defined in global variable
+            `GENOME_BGC_MAPPINGS_FILENAME`.
+    """
+    bgc_dir = Path(bgc_dir)
+    genome_bgc_mappings = {}
+
+    for subdir in list_dirs(bgc_dir):
+        genome_id = Path(subdir).name
+        bgc_files = list_files(subdir, suffix=(".gbk"), keep_parent=False)
+        bgc_ids = [bgc_id for f in bgc_files if (bgc_id := Path(f).stem) != genome_id]
+        if bgc_ids:
+            genome_bgc_mappings[genome_id] = bgc_ids
+        else:
+            logger.warning("No BGC files found in %s", subdir)
+
+    # sort mappings by genome_id and construct json data
+    genome_bgc_mappings = dict(sorted(genome_bgc_mappings.items()))
+    json_data_mappings = [{"genome_ID": k, "BGC_ID": v} for k, v in genome_bgc_mappings.items()]
+    json_data = {"mappings": json_data_mappings, "version": "1.0"}
+
+    # validate json data
+    validate(instance=json_data, schema=GENOME_BGC_MAPPINGS_SCHEMA)
+
+    if output_file is None:
+        output_file = bgc_dir / GENOME_BGC_MAPPINGS_FILENAME
+    with open(output_file, "w") as f:
+        json.dump(json_data, f)
+    logger.info("Generated genome-BGC mappings file: %s", output_file)
+
+
+
+ +
+ +
+ + +

+ add_strain_to_bgc + + +

+
add_strain_to_bgc(
+    strains: StrainCollection, bgcs: Sequence[BGC]
+) -> tuple[list[BGC], list[BGC]]
+
+ +
+ +

Assign a Strain object to BGC.strain for input BGCs.

+

BGC id is used to find the corresponding Strain object. It's possible that +no Strain object is found for a BGC id.

+

Note that the input list bgcs will be changed in place.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
strains + StrainCollection + +
+

A collection of all strain objects.

+
+
+ required +
bgcs + Sequence[BGC] + +
+

A list of BGC objects.

+
+
+ required +
+ + +

Returns:

+ + + + + + + + + + + + + +
TypeDescription
+ tuple[list[BGC], list[BGC]] + +
+

A tuple of two lists of BGC objects,

+
    +
  • the first list contains BGC objects that are updated with Strain object;
  • +
  • the second list contains BGC objects that are not updated with + Strain object because no Strain object is found.
  • +
+
+
+ + +

Raises:

+ + + + + + + + + + + + + +
TypeDescription
+ ValueError + +
+

Multiple strain objects found for a BGC id.

+
+
+ +
+ Source code in src/nplinker/genomics/utils.py +
def add_strain_to_bgc(
+    strains: StrainCollection, bgcs: Sequence[BGC]
+) -> tuple[list[BGC], list[BGC]]:
+    """Assign a Strain object to `BGC.strain` for input BGCs.
+
+    BGC id is used to find the corresponding Strain object. It's possible that
+    no Strain object is found for a BGC id.
+
+    Note that the input list `bgcs` will be changed in place.
+
+    Args:
+        strains: A collection of all strain objects.
+        bgcs: A list of BGC objects.
+
+    Returns:
+        A tuple of two lists of BGC objects,
+
+            - the first list contains BGC objects that are updated with Strain object;
+            - the second list contains BGC objects that are not updated with
+                Strain object because no Strain object is found.
+
+    Raises:
+        ValueError: Multiple strain objects found for a BGC id.
+    """
+    bgc_with_strain = []
+    bgc_without_strain = []
+    for bgc in bgcs:
+        try:
+            strain_list = strains.lookup(bgc.id)
+        except ValueError:
+            bgc_without_strain.append(bgc)
+            continue
+        if len(strain_list) > 1:
+            raise ValueError(
+                f"Multiple strain objects found for BGC id '{bgc.id}'."
+                f"BGC object accept only one strain."
+            )
+        bgc.strain = strain_list[0]
+        bgc_with_strain.append(bgc)
+
+    logger.info(
+        f"{len(bgc_with_strain)} BGC objects updated with Strain object.\n"
+        f"{len(bgc_without_strain)} BGC objects not updated with Strain object."
+    )
+    return bgc_with_strain, bgc_without_strain
+
+
+
+ +
+ +
+ + +

+ add_bgc_to_gcf + + +

+
add_bgc_to_gcf(
+    bgcs: Sequence[BGC], gcfs: Sequence[GCF]
+) -> tuple[list[GCF], list[GCF], dict[GCF, set[str]]]
+
+ +
+ +

Add BGC objects to GCF object based on GCF's BGC ids.

+

The attribute of GCF.bgc_ids contains the ids of BGC objects. These ids +are used to find BGC objects from the input bgcs list. The found BGC +objects are added to the bgcs attribute of GCF object. It is possible that +some BGC ids are not found in the input bgcs list, and so their BGC +objects are missing in the GCF object.

+

This method changes the lists bgcs and gcfs in place.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
bgcs + Sequence[BGC] + +
+

A list of BGC objects.

+
+
+ required +
gcfs + Sequence[GCF] + +
+

A list of GCF objects.

+
+
+ required +
+ + +

Returns:

+ + + + + + + + + + + + + +
TypeDescription
+ tuple[list[GCF], list[GCF], dict[GCF, set[str]]] + +
+

A tuple of two lists and a dictionary,

+
    +
  • The first list contains GCF objects that are updated with BGC objects;
  • +
  • The second list contains GCF objects that are not updated with BGC objects + because no BGC objects are found;
  • +
  • The dictionary contains GCF objects as keys and a set of ids of missing + BGC objects as values.
  • +
+
+
+ +
+ Source code in src/nplinker/genomics/utils.py +
def add_bgc_to_gcf(
+    bgcs: Sequence[BGC], gcfs: Sequence[GCF]
+) -> tuple[list[GCF], list[GCF], dict[GCF, set[str]]]:
+    """Add BGC objects to GCF object based on GCF's BGC ids.
+
+    The attribute of `GCF.bgc_ids` contains the ids of BGC objects. These ids
+    are used to find BGC objects from the input `bgcs` list. The found BGC
+    objects are added to the `bgcs` attribute of GCF object. It is possible that
+    some BGC ids are not found in the input `bgcs` list, and so their BGC
+    objects are missing in the GCF object.
+
+    This method changes the lists `bgcs` and `gcfs` in place.
+
+    Args:
+        bgcs: A list of BGC objects.
+        gcfs: A list of GCF objects.
+
+    Returns:
+        A tuple of two lists and a dictionary,
+
+            - The first list contains GCF objects that are updated with BGC objects;
+            - The second list contains GCF objects that are not updated with BGC objects
+                because no BGC objects are found;
+            - The dictionary contains GCF objects as keys and a set of ids of missing
+                BGC objects as values.
+    """
+    bgc_dict = {bgc.id: bgc for bgc in bgcs}
+    gcf_with_bgc = []
+    gcf_without_bgc = []
+    gcf_missing_bgc: dict[GCF, set[str]] = {}
+    for gcf in gcfs:
+        for bgc_id in gcf.bgc_ids:
+            try:
+                bgc = bgc_dict[bgc_id]
+            except KeyError:
+                if gcf not in gcf_missing_bgc:
+                    gcf_missing_bgc[gcf] = {bgc_id}
+                else:
+                    gcf_missing_bgc[gcf].add(bgc_id)
+                continue
+            gcf.add_bgc(bgc)
+
+        if gcf.bgcs:
+            gcf_with_bgc.append(gcf)
+        else:
+            gcf_without_bgc.append(gcf)
+
+    logger.info(
+        f"{len(gcf_with_bgc)} GCF objects updated with BGC objects.\n"
+        f"{len(gcf_without_bgc)} GCF objects not updated with BGC objects.\n"
+        f"{len(gcf_missing_bgc)} GCF objects have missing BGC objects."
+    )
+    return gcf_with_bgc, gcf_without_bgc, gcf_missing_bgc
+
+
+
+ +
+ +
+ + +

+ get_mibig_from_gcf + + +

+
get_mibig_from_gcf(
+    gcfs: Sequence[GCF],
+) -> tuple[list[BGC], StrainCollection]
+
+ +
+ +

Get MIBiG BGCs and strains from GCF objects.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
gcfs + Sequence[GCF] + +
+

A list of GCF objects.

+
+
+ required +
+ + +

Returns:

+ + + + + + + + + + + + + +
TypeDescription
+ tuple[list[BGC], StrainCollection] + +
+

A tuple of two objects,

+
    +
  • the first is a list of MIBiG BGC objects used in the GCFs;
  • +
  • the second is a StrainCollection object that contains all Strain objects used in the +GCFs.
  • +
+
+
+ +
+ Source code in src/nplinker/genomics/utils.py +
def get_mibig_from_gcf(gcfs: Sequence[GCF]) -> tuple[list[BGC], StrainCollection]:
+    """Get MIBiG BGCs and strains from GCF objects.
+
+    Args:
+        gcfs: A list of GCF objects.
+
+    Returns:
+        A tuple of two objects,
+
+            - the first is a list of MIBiG BGC objects used in the GCFs;
+            - the second is a StrainCollection object that contains all Strain objects used in the
+            GCFs.
+    """
+    mibig_bgcs_in_use = []
+    mibig_strains_in_use = StrainCollection()
+    for gcf in gcfs:
+        for bgc in gcf.bgcs:
+            if bgc.is_mibig():
+                mibig_bgcs_in_use.append(bgc)
+                if bgc.strain is not None:
+                    mibig_strains_in_use.add(bgc.strain)
+    return mibig_bgcs_in_use, mibig_strains_in_use
+
+
+
+ +
+ +
+ + +

+ extract_mappings_strain_id_original_genome_id + + +

+
extract_mappings_strain_id_original_genome_id(
+    podp_project_json_file: str | PathLike,
+) -> dict[str, set[str]]
+
+ +
+ +

Extract mappings "strain id <-> original genome id".

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
podp_project_json_file + str | PathLike + +
+

The path to the PODP project +JSON file.

+
+
+ required +
+ + +

Returns:

+ + + + + + + + + + + + + +
TypeDescription
+ dict[str, set[str]] + +
+

Key is strain id and value is a set of original genome ids.

+
+
+ + +
+ Notes +

The podp_project_json_file is the project JSON file downloaded from +PODP platform. For example, for project MSV000079284, its json file is +https://pairedomicsdata.bioinformatics.nl/api/projects/4b29ddc3-26d0-40d7-80c5-44fb6631dbf9.4.

+
+
+ Source code in src/nplinker/genomics/utils.py +
def extract_mappings_strain_id_original_genome_id(
+    podp_project_json_file: str | PathLike,
+) -> dict[str, set[str]]:
+    """Extract mappings "strain id <-> original genome id".
+
+    Args:
+        podp_project_json_file: The path to the PODP project
+            JSON file.
+
+    Returns:
+        Key is strain id and value is a set of original genome ids.
+
+    Notes:
+        The `podp_project_json_file` is the project JSON file downloaded from
+        PODP platform. For example, for project MSV000079284, its json file is
+        https://pairedomicsdata.bioinformatics.nl/api/projects/4b29ddc3-26d0-40d7-80c5-44fb6631dbf9.4.
+    """
+    mappings_dict: dict[str, set[str]] = {}
+    with open(podp_project_json_file, "r") as f:
+        json_data = json.load(f)
+
+    validate_podp_json(json_data)
+
+    for record in json_data["genomes"]:
+        strain_id = record["genome_label"]
+        genome_id = get_best_available_genome_id(record["genome_ID"])
+        if genome_id is None:
+            logger.warning("Failed to extract genome ID from genome with label %s", strain_id)
+            continue
+        if strain_id in mappings_dict:
+            mappings_dict[strain_id].add(genome_id)
+        else:
+            mappings_dict[strain_id] = {genome_id}
+    return mappings_dict
+
+
+
+ +
+ +
+ + +

+ extract_mappings_original_genome_id_resolved_genome_id + + +

+
extract_mappings_original_genome_id_resolved_genome_id(
+    genome_status_json_file: str | PathLike,
+) -> dict[str, str]
+
+ +
+ +

Extract mappings "original_genome_id <-> resolved_genome_id".

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
genome_status_json_file + str | PathLike + +
+

The path to the genome status +JSON file.

+
+
+ required +
+ + +

Returns:

+ + + + + + + + + + + + + +
TypeDescription
+ dict[str, str] + +
+

Key is original genome id and value is resolved genome id.

+
+
+ + +
+ Notes +

The genome_status_json_file is usually generated by the +podp_download_and_extract_antismash_data function with +a default file name defined in nplinker.defaults.GENOME_STATUS_FILENAME.

+
+
+ Source code in src/nplinker/genomics/utils.py +
def extract_mappings_original_genome_id_resolved_genome_id(
+    genome_status_json_file: str | PathLike,
+) -> dict[str, str]:
+    """Extract mappings "original_genome_id <-> resolved_genome_id".
+
+    Args:
+        genome_status_json_file: The path to the genome status
+            JSON file.
+
+    Returns:
+        Key is original genome id and value is resolved genome id.
+
+    Notes:
+        The `genome_status_json_file` is usually generated by the
+        `podp_download_and_extract_antismash_data` function with
+        a default file name defined in `nplinker.defaults.GENOME_STATUS_FILENAME`.
+    """
+    gs_mappings_dict = GenomeStatus.read_json(genome_status_json_file)
+    return {gs.original_id: gs.resolved_refseq_id for gs in gs_mappings_dict.values()}
+
+
+
+ +
+ +
+ + +

+ extract_mappings_resolved_genome_id_bgc_id + + +

+
extract_mappings_resolved_genome_id_bgc_id(
+    genome_bgc_mappings_file: str | PathLike,
+) -> dict[str, set[str]]
+
+ +
+ +

Extract mappings "resolved_genome_id <-> bgc_id".

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
genome_bgc_mappings_file + str | PathLike + +
+

The path to the genome BGC +mappings JSON file.

+
+
+ required +
+ + +

Returns:

+ + + + + + + + + + + + + +
TypeDescription
+ dict[str, set[str]] + +
+

Key is resolved genome id and value is a set of BGC ids.

+
+
+ + +
+ Notes +

The genome_bgc_mappings_file is usually generated by the +generate_mappings_genome_id_bgc_id function with a default file name +defined in nplinker.defaults.GENOME_BGC_MAPPINGS_FILENAME.

+
+
+ Source code in src/nplinker/genomics/utils.py +
def extract_mappings_resolved_genome_id_bgc_id(
+    genome_bgc_mappings_file: str | PathLike,
+) -> dict[str, set[str]]:
+    """Extract mappings "resolved_genome_id <-> bgc_id".
+
+    Args:
+        genome_bgc_mappings_file: The path to the genome BGC
+            mappings JSON file.
+
+    Returns:
+        Key is resolved genome id and value is a set of BGC ids.
+
+    Notes:
+        The `genome_bgc_mappings_file` is usually generated by the
+        `generate_mappings_genome_id_bgc_id` function with a default file name
+        defined in `nplinker.defaults.GENOME_BGC_MAPPINGS_FILENAME`.
+    """
+    with open(genome_bgc_mappings_file, "r") as f:
+        json_data = json.load(f)
+
+    # validate the JSON data
+    validate(json_data, GENOME_BGC_MAPPINGS_SCHEMA)
+
+    return {mapping["genome_ID"]: set(mapping["BGC_ID"]) for mapping in json_data["mappings"]}
+
+
+
+ +
+ +
+ + +

+ get_mappings_strain_id_bgc_id + + +

+
get_mappings_strain_id_bgc_id(
+    mappings_strain_id_original_genome_id: Mapping[
+        str, set[str]
+    ],
+    mappings_original_genome_id_resolved_genome_id: Mapping[
+        str, str
+    ],
+    mappings_resolved_genome_id_bgc_id: Mapping[
+        str, set[str]
+    ],
+) -> dict[str, set[str]]
+
+ +
+ +

Get mappings "strain_id <-> bgc_id".

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
mappings_strain_id_original_genome_id + Mapping[str, set[str]] + +
+

Mappings +"strain_id <-> original_genome_id".

+
+
+ required +
mappings_original_genome_id_resolved_genome_id + Mapping[str, str] + +
+

Mappings +"original_genome_id <-> resolved_genome_id".

+
+
+ required +
mappings_resolved_genome_id_bgc_id + Mapping[str, set[str]] + +
+

Mappings +"resolved_genome_id <-> bgc_id".

+
+
+ required +
+ + +

Returns:

+ + + + + + + + + + + + + +
TypeDescription
+ dict[str, set[str]] + +
+

Key is strain id and value is a set of BGC ids.

+
+
+ + +
+ See Also +
    +
  • extract_mappings_strain_id_original_genome_id: Extract mappings + "strain_id <-> original_genome_id".
  • +
  • extract_mappings_original_genome_id_resolved_genome_id: Extract mappings + "original_genome_id <-> resolved_genome_id".
  • +
  • extract_mappings_resolved_genome_id_bgc_id: Extract mappings + "resolved_genome_id <-> bgc_id".
  • +
+
+
+ Source code in src/nplinker/genomics/utils.py +
def get_mappings_strain_id_bgc_id(
+    mappings_strain_id_original_genome_id: Mapping[str, set[str]],
+    mappings_original_genome_id_resolved_genome_id: Mapping[str, str],
+    mappings_resolved_genome_id_bgc_id: Mapping[str, set[str]],
+) -> dict[str, set[str]]:
+    """Get mappings "strain_id <-> bgc_id".
+
+    Args:
+        mappings_strain_id_original_genome_id: Mappings
+            "strain_id <-> original_genome_id".
+        mappings_original_genome_id_resolved_genome_id: Mappings
+            "original_genome_id <-> resolved_genome_id".
+        mappings_resolved_genome_id_bgc_id: Mappings
+            "resolved_genome_id <-> bgc_id".
+
+    Returns:
+        Key is strain id and value is a set of BGC ids.
+
+    See Also:
+        - `extract_mappings_strain_id_original_genome_id`: Extract mappings
+            "strain_id <-> original_genome_id".
+        - `extract_mappings_original_genome_id_resolved_genome_id`: Extract mappings
+            "original_genome_id <-> resolved_genome_id".
+        - `extract_mappings_resolved_genome_id_bgc_id`: Extract mappings
+            "resolved_genome_id <-> bgc_id".
+    """
+    mappings_dict = {}
+    for strain_id, original_genome_ids in mappings_strain_id_original_genome_id.items():
+        bgc_ids = set()
+        for original_genome_id in original_genome_ids:
+            resolved_genome_id = mappings_original_genome_id_resolved_genome_id[original_genome_id]
+            if (bgc_id := mappings_resolved_genome_id_bgc_id.get(resolved_genome_id)) is not None:
+                bgc_ids.update(bgc_id)
+        if bgc_ids:
+            mappings_dict[strain_id] = bgc_ids
+    return mappings_dict
+
+
+
+ +
+ + + +
+ +
+ +
+ + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/2.0.0a3/api/gnps/index.html b/2.0.0a3/api/gnps/index.html new file mode 100644 index 00000000..5bbfcaa4 --- /dev/null +++ b/2.0.0a3/api/gnps/index.html @@ -0,0 +1,4595 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + GNPS - NPLinker + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + +

GNPS

+ +
+ + + +

+ gnps + + +

+ +
+ + + +
+ + + + + + + + +
+ + + +

+ GNPSFormat + + +

+ + +
+

+ Bases: Enum

+ + +

Enum class for GNPS format (workflow).

+

The GNPS format refers to the GNPS workflow. The name of the enum is a +simple short name for the workflow, and the value of the enum is the actual +name of the workflow in the GNPS website.

+ + + + +
+ + + + + + + +
+ + + +

+ SNETS + + + + class-attribute + instance-attribute + + +

+
SNETS = 'METABOLOMICS-SNETS'
+
+ +
+
+ +
+ +
+ + + +

+ SNETSV2 + + + + class-attribute + instance-attribute + + +

+
SNETSV2 = 'METABOLOMICS-SNETS-V2'
+
+ +
+
+ +
+ +
+ + + +

+ FBMN + + + + class-attribute + instance-attribute + + +

+
FBMN = 'FEATURE-BASED-MOLECULAR-NETWORKING'
+
+ +
+
+ +
+ +
+ + + +

+ Unknown + + + + class-attribute + instance-attribute + + +

+
Unknown = 'Unknown-GNPS-Workflow'
+
+ +
+
+ +
+ + + + + +
+ +
+ +
+ +
+ + + +

+ GNPSDownloader + + +

+
GNPSDownloader(task_id: str, download_root: str | PathLike)
+
+ +
+ + +

Download GNPS zip archive for the given task id.

+

Note that only GNPS workflows listed in the GNPSFormat enum are supported.

+ + +

Attributes:

+ + + + + + + + + + + + + + + + + + + + +
NameTypeDescription
GNPS_DATA_DOWNLOAD_URL + str + +
+

URL template for downloading GNPS data.

+
+
GNPS_DATA_DOWNLOAD_URL_FBMN + str + +
+

URL template for downloading GNPS data for FBMN.

+
+
+ + + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
task_id + str + +
+

GNPS task id, identifying the data to be downloaded.

+
+
+ required +
download_root + str | PathLike + +
+

Path where to store the downloaded archive.

+
+
+ required +
+ + +

Raises:

+ + + + + + + + + + + + + +
TypeDescription
+ ValueError + +
+

If the given task id does not correspond to a supported +GNPS workflow.

+
+
+ + +

Examples:

+
>>> GNPSDownloader("c22f44b14a3d450eb836d607cb9521bb", "~/downloads")
+
+ +
+ Source code in src/nplinker/metabolomics/gnps/gnps_downloader.py +
27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
def __init__(self, task_id: str, download_root: str | PathLike):
+    """Initialize the GNPSDownloader.
+
+    Args:
+        task_id: GNPS task id, identifying the data to be downloaded.
+        download_root: Path where to store the downloaded archive.
+
+    Raises:
+        ValueError: If the given task id does not correspond to a supported
+            GNPS workflow.
+
+    Examples:
+        >>> GNPSDownloader("c22f44b14a3d450eb836d607cb9521bb", "~/downloads")
+    """
+    gnps_format = gnps_format_from_task_id(task_id)
+    if gnps_format == GNPSFormat.Unknown:
+        raise ValueError(
+            f"Unknown workflow type for GNPS task '{task_id}'."
+            f"Supported GNPS workflows are described in the GNPSFormat enum, "
+            f"including such as 'METABOLOMICS-SNETS', 'METABOLOMICS-SNETS-V2' "
+            f"and 'FEATURE-BASED-MOLECULAR-NETWORKING'."
+        )
+
+    self._task_id = task_id
+    self._download_root: Path = Path(download_root)
+    self._gnps_format = gnps_format
+    self._file_name = gnps_format.value + "-" + self._task_id + ".zip"
+
+
+ + + +
+ + + + + + + +
+ + + +

+ GNPS_DATA_DOWNLOAD_URL + + + + class-attribute + instance-attribute + + +

+
GNPS_DATA_DOWNLOAD_URL: str = (
+    "https://gnps.ucsd.edu/ProteoSAFe/DownloadResult?task={}&view=download_clustered_spectra"
+)
+
+ +
+
+ +
+ +
+ + + +

+ GNPS_DATA_DOWNLOAD_URL_FBMN + + + + class-attribute + instance-attribute + + +

+
GNPS_DATA_DOWNLOAD_URL_FBMN: str = (
+    "https://gnps.ucsd.edu/ProteoSAFe/DownloadResult?task={}&view=download_cytoscape_data"
+)
+
+ +
+
+ +
+ +
+ + + +

+ gnps_format + + + + property + + +

+
gnps_format: GNPSFormat
+
+ +
+ +

Get the GNPS workflow type.

+ + +

Returns:

+ + + + + + + + + + + + + +
TypeDescription
+ GNPSFormat + +
+

GNPS workflow type.

+
+
+
+ +
+ + + +
+ + +

+ download + + +

+
download() -> 'Self'
+
+ +
+ +

Execute the downloading process.

+

Note: GNPS data is downloaded using the POST method (empty payload is OK).

+ +
+ Source code in src/nplinker/metabolomics/gnps/gnps_downloader.py +
64
+65
+66
+67
+68
+69
+70
+71
+72
def download(self) -> "Self":
+    """Execute the downloading process.
+
+    Note: GNPS data is downloaded using the POST method (empty payload is OK).
+    """
+    download_url(
+        self.get_url(), self._download_root, filename=self._file_name, http_method="POST"
+    )
+    return self
+
+
+
+ +
+ +
+ + +

+ get_download_file + + +

+
get_download_file() -> str
+
+ +
+ +

Get the path to the zip file.

+ + +

Returns:

+ + + + + + + + + + + + + +
TypeDescription
+ str + +
+

Download path as string

+
+
+ +
+ Source code in src/nplinker/metabolomics/gnps/gnps_downloader.py +
74
+75
+76
+77
+78
+79
+80
def get_download_file(self) -> str:
+    """Get the path to the zip file.
+
+    Returns:
+        Download path as string
+    """
+    return str(Path(self._download_root) / self._file_name)
+
+
+
+ +
+ +
+ + +

+ get_task_id + + +

+
get_task_id() -> str
+
+ +
+ +

Get the GNPS task id.

+ + +

Returns:

+ + + + + + + + + + + + + +
TypeDescription
+ str + +
+

Task id as string.

+
+
+ +
+ Source code in src/nplinker/metabolomics/gnps/gnps_downloader.py +
82
+83
+84
+85
+86
+87
+88
def get_task_id(self) -> str:
+    """Get the GNPS task id.
+
+    Returns:
+        Task id as string.
+    """
+    return self._task_id
+
+
+
+ +
+ +
+ + +

+ get_url + + +

+
get_url() -> str
+
+ +
+ +

Get the full URL linking to GNPS data to be downloaded.

+ + +

Returns:

+ + + + + + + + + + + + + +
TypeDescription
+ str + +
+

URL pointing to the GNPS data to be downloaded.

+
+
+ +
+ Source code in src/nplinker/metabolomics/gnps/gnps_downloader.py +
90
+91
+92
+93
+94
+95
+96
+97
+98
def get_url(self) -> str:
+    """Get the full URL linking to GNPS data to be downloaded.
+
+    Returns:
+        URL pointing to the GNPS data to be downloaded.
+    """
+    if self.gnps_format == GNPSFormat.FBMN:
+        return GNPSDownloader.GNPS_DATA_DOWNLOAD_URL_FBMN.format(self._task_id)
+    return GNPSDownloader.GNPS_DATA_DOWNLOAD_URL.format(self._task_id)
+
+
+
+ +
+ + + +
+ +
+ +
+ +
+ + + +

+ GNPSExtractor + + +

+
GNPSExtractor(
+    file: str | PathLike, extract_dir: str | PathLike
+)
+
+ +
+ + +

Class to extract files from a GNPS molecular networking archive(.zip).

+

Four files are extracted and renamed to the following names:

+
    +
  • file_mappings(.tsv/.csv)
  • +
  • spectra.mgf
  • +
  • molecular_families.tsv
  • +
  • annotations.tsv
  • +
+

The files to be extracted are selected based on the GNPS workflow type, +as described below (in the order of the files above):

+
    +
  1. METABOLOMICS-SNETS
      +
    • clusterinfosummarygroup_attributes_withIDs_withcomponentID/*.tsv
    • +
    • METABOLOMICS-SNETS*.mgf
    • +
    • networkedges_selfloop/*.pairsinfo
    • +
    • result_specnets_DB/*.tsv
    • +
    +
  2. +
  3. METABOLOMICS-SNETS-V2
      +
    • clusterinfosummarygroup_attributes_withIDs_withcomponentID/*.clustersummary
    • +
    • METABOLOMICS-SNETS-V2*.mgf
    • +
    • networkedges_selfloop/*.selfloop
    • +
    • result_specnets_DB/.tsv
    • +
    +
  4. +
  5. FEATURE-BASED-MOLECULAR-NETWORKING
      +
    • quantification_table/.csv
    • +
    • spectra/*.mgf
    • +
    • networkedges_selfloop/*.selfloop
    • +
    • DB_result/*.tsv
    • +
    +
  6. +
+ + + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
file + str | PathLike + +
+

The path to the GNPS zip file.

+
+
+ required +
extract_dir + str | PathLike + +
+

path to the directory where to extract the files to.

+
+
+ required +
+ + +

Raises:

+ + + + + + + + + + + + + +
TypeDescription
+ ValueError + +
+

If the given file is an invalid GNPS archive.

+
+
+ + +

Examples:

+
>>> gnps_extractor = GNPSExtractor("path/to/gnps_archive.zip", "path/to/extract_dir")
+>>> gnps_extractor.gnps_format
+<GNPSFormat.SNETS: 'METABOLOMICS-SNETS'>
+>>> gnps_extractor.extract_dir
+'path/to/extract_dir'
+
+ +
+ Source code in src/nplinker/metabolomics/gnps/gnps_extractor.py +
41
+42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+58
+59
+60
+61
+62
+63
+64
+65
+66
+67
+68
+69
+70
+71
+72
+73
+74
+75
+76
+77
+78
def __init__(self, file: str | PathLike, extract_dir: str | PathLike):
+    """Initialize the GNPSExtractor.
+
+    Args:
+        file: The path to the GNPS zip file.
+        extract_dir: path to the directory where to extract the files to.
+
+    Raises:
+        ValueError: If the given file is an invalid GNPS archive.
+
+    Examples:
+        >>> gnps_extractor = GNPSExtractor("path/to/gnps_archive.zip", "path/to/extract_dir")
+        >>> gnps_extractor.gnps_format
+        <GNPSFormat.SNETS: 'METABOLOMICS-SNETS'>
+        >>> gnps_extractor.extract_dir
+        'path/to/extract_dir'
+    """
+    gnps_format = gnps_format_from_archive(file)
+    if gnps_format == GNPSFormat.Unknown:
+        raise ValueError(
+            f"Unknown workflow type for GNPS archive '{file}'."
+            f"Supported GNPS workflows are described in the GNPSFormat enum, "
+            f"including such as 'METABOLOMICS-SNETS', 'METABOLOMICS-SNETS-V2' "
+            f"and 'FEATURE-BASED-MOLECULAR-NETWORKING'."
+        )
+
+    self._file = Path(file)
+    self._extract_path = Path(extract_dir)
+    self._gnps_format = gnps_format
+    # the order of filenames matters
+    self._target_files = [
+        "file_mappings",
+        "spectra.mgf",
+        "molecular_families.tsv",
+        "annotations.tsv",
+    ]
+
+    self._extract()
+
+
+ + + +
+ + + + + + + +
+ + + +

+ gnps_format + + + + property + + +

+
gnps_format: GNPSFormat
+
+ +
+ +

Get the GNPS workflow type.

+ + +

Returns:

+ + + + + + + + + + + + + +
TypeDescription
+ GNPSFormat + +
+

GNPS workflow type.

+
+
+
+ +
+ +
+ + + +

+ extract_dir + + + + property + + +

+
extract_dir: str
+
+ +
+ +

Get the path where to extract the files to.

+ + +

Returns:

+ + + + + + + + + + + + + +
TypeDescription
+ str + +
+

Path where to extract files as string.

+
+
+
+ +
+ + + + + +
+ +
+ +
+ +
+ + + +

+ GNPSSpectrumLoader + + +

+
GNPSSpectrumLoader(file: str | PathLike)
+
+ +
+

+ Bases: SpectrumLoaderBase

+ + +

Class to load mass spectra from the given GNPS MGF file.

+

The file mappings file is from GNPS output archive, as described below +for each GNPS workflow type:

+
    +
  1. METABOLOMICS-SNETS
      +
    • METABOLOMICS-SNETS*.mgf
    • +
    +
  2. +
  3. METABOLOMICS-SNETS-V2
      +
    • METABOLOMICS-SNETS-V2*.mgf
    • +
    +
  4. +
  5. FEATURE-BASED-MOLECULAR-NETWORKING
      +
    • spectra/*.mgf
    • +
    +
  6. +
+ + + +

Parameters:

+ + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
file + str | PathLike + +
+

path to the MGF file.

+
+
+ required +
+ + +

Raises:

+ + + + + + + + + + + + + +
TypeDescription
+ ValueError + +
+

Raises ValueError if the file is not valid.

+
+
+ + +

Examples:

+
>>> loader = GNPSSpectrumLoader("gnps_spectra.mgf")
+>>> print(loader.spectra[0])
+
+ +
+ Source code in src/nplinker/metabolomics/gnps/gnps_spectrum_loader.py +
26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
def __init__(self, file: str | PathLike):
+    """Initialize the GNPSSpectrumLoader.
+
+    Args:
+        file: path to the MGF file.
+
+    Raises:
+        ValueError: Raises ValueError if the file is not valid.
+
+    Examples:
+        >>> loader = GNPSSpectrumLoader("gnps_spectra.mgf")
+        >>> print(loader.spectra[0])
+    """
+    self._file = str(file)
+    self._spectra: list[Spectrum] = []
+
+    self._validate()
+    self._load()
+
+
+ + + +
+ + + + + + + +
+ + + +

+ spectra + + + + property + + +

+
spectra: list[Spectrum]
+
+ +
+ +

Get the list of Spectrum objects.

+ + +

Returns:

+ + + + + + + + + + + + + +
TypeDescription
+ list[Spectrum] + +
+

list[Spectrum]: the loaded spectra as a list of Spectrum objects.

+
+
+
+ +
+ + + + + +
+ +
+ +
+ +
+ + + +

+ GNPSMolecularFamilyLoader + + +

+
GNPSMolecularFamilyLoader(file: str | PathLike)
+
+ +
+

+ Bases: MolecularFamilyLoaderBase

+ + +

Class to load molecular families from GNPS output file.

+

The molecular family file is from GNPS output archive, as described below +for each GNPS workflow type:

+
    +
  1. METABOLOMICS-SNETS
      +
    • networkedges_selfloop/*.pairsinfo
    • +
    +
  2. +
  3. METABOLOMICS-SNETS-V2
      +
    • networkedges_selfloop/*.selfloop
    • +
    +
  4. +
  5. FEATURE-BASED-MOLECULAR-NETWORKING
      +
    • networkedges_selfloop/*.selfloop
    • +
    +
  6. +
+

The "ComponentIndex" column in the GNPS molecular family's file is treated +as family id. But for molecular families that have only one member (i.e. spectrum), +named singleton molecular families, their files have the same value of +"-1" in the "ComponentIndex" column. To make the family id unique,the +spectrum id plus a prefix singleton- is used as the family id of +singleton molecular families.

+ + + +

Parameters:

+ + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
file + str | PathLike + +
+

Path to the GNPS molecular family file.

+
+
+ required +
+ + +

Raises:

+ + + + + + + + + + + + + +
TypeDescription
+ ValueError + +
+

Raises ValueError if the file is not valid.

+
+
+ + +

Examples:

+
>>> loader = GNPSMolecularFamilyLoader("gnps_molecular_families.tsv")
+>>> print(loader.families)
+[<MolecularFamily 1>, <MolecularFamily 2>, ...]
+>>> print(loader.families[0].spectra_ids)
+{'1', '3', '7', ...}
+
+ +
+ Source code in src/nplinker/metabolomics/gnps/gnps_molecular_family_loader.py +
30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
def __init__(self, file: str | PathLike):
+    """Initialize the GNPSMolecularFamilyLoader.
+
+    Args:
+        file: Path to the GNPS molecular family file.
+
+    Raises:
+        ValueError: Raises ValueError if the file is not valid.
+
+    Examples:
+        >>> loader = GNPSMolecularFamilyLoader("gnps_molecular_families.tsv")
+        >>> print(loader.families)
+        [<MolecularFamily 1>, <MolecularFamily 2>, ...]
+        >>> print(loader.families[0].spectra_ids)
+        {'1', '3', '7', ...}
+    """
+    self._mfs: list[MolecularFamily] = []
+    self._file = file
+
+    self._validate()
+    self._load()
+
+
+ + + +
+ + + + + + + + + +
+ + +

+ get_mfs + + +

+
get_mfs(
+    keep_singleton: bool = False,
+) -> list[MolecularFamily]
+
+ +
+ +

Get MolecularFamily objects.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
keep_singleton + bool + +
+

True to keep singleton molecular families. A +singleton molecular family is a molecular family that contains +only one spectrum.

+
+
+ False +
+ + +

Returns:

+ + + + + + + + + + + + + +
TypeDescription
+ list[MolecularFamily] + +
+

A list of MolecularFamily objects with their spectra ids.

+
+
+ +
+ Source code in src/nplinker/metabolomics/gnps/gnps_molecular_family_loader.py +
52
+53
+54
+55
+56
+57
+58
+59
+60
+61
+62
+63
+64
+65
+66
def get_mfs(self, keep_singleton: bool = False) -> list[MolecularFamily]:
+    """Get MolecularFamily objects.
+
+    Args:
+        keep_singleton: True to keep singleton molecular families. A
+            singleton molecular family is a molecular family that contains
+            only one spectrum.
+
+    Returns:
+        A list of MolecularFamily objects with their spectra ids.
+    """
+    mfs = self._mfs
+    if not keep_singleton:
+        mfs = [mf for mf in mfs if not mf.is_singleton()]
+    return mfs
+
+
+
+ +
+ + + +
+ +
+ +
+ +
+ + + +

+ GNPSAnnotationLoader + + +

+
GNPSAnnotationLoader(file: str | PathLike)
+
+ +
+

+ Bases: AnnotationLoaderBase

+ + +

Load annotations from GNPS output file.

+

The annotation file is a .tsv file from GNPS output archive, as described +below for each GNPS workflow type:

+
    +
  1. METABOLOMICS-SNETS
      +
    • result_specnets_DB/*.tsv
    • +
    +
  2. +
  3. METABOLOMICS-SNETS-V2
      +
    • result_specnets_DB/.tsv
    • +
    +
  4. +
  5. FEATURE-BASED-MOLECULAR-NETWORKING
      +
    • DB_result/*.tsv
    • +
    +
  6. +
+ + + +

Parameters:

+ + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
file + str | PathLike + +
+

The GNPS annotation file.

+
+
+ required +
+ + +

Examples:

+
>>> loader = GNPSAnnotationLoader("gnps_annotations.tsv")
+>>> print(loader.annotations["100"])
+{'#Scan#': '100',
+'Adduct': 'M+H',
+'CAS_Number': 'N/A',
+'Charge': '1',
+'Compound_Name': 'MLS002153841-01!Iobenguane sulfate',
+'Compound_Source': 'NIH Pharmacologically Active Library',
+'Data_Collector': 'VP/LMS',
+'ExactMass': '274.992',
+'INCHI': 'N/A',
+'INCHI_AUX': 'N/A',
+'Instrument': 'qTof',
+'IonMode': 'Positive',
+'Ion_Source': 'LC-ESI',
+'LibMZ': '276.003',
+'LibraryName': 'lib-00014.mgf',
+'LibraryQualityString': 'Gold',
+'Library_Class': '1',
+'MQScore': '0.704152',
+'MZErrorPPM': '405416',
+'MassDiff': '111.896',
+'Organism': 'GNPS-NIH-SMALLMOLECULEPHARMACOLOGICALLYACTIVE',
+'PI': 'Dorrestein',
+'Precursor_MZ': '276.003',
+'Pubmed_ID': 'N/A',
+'RT_Query': '795.979',
+'SharedPeaks': '7',
+'Smiles': 'NC(=N)NCc1cccc(I)c1.OS(=O)(=O)O',
+'SpecCharge': '1',
+'SpecMZ': '164.107',
+'SpectrumFile': 'spectra/specs_ms.pklbin',
+'SpectrumID': 'CCMSLIB00000086167',
+'TIC_Query': '986.997',
+'UpdateWorkflowName': 'UPDATE-SINGLE-ANNOTATED-GOLD',
+'tags': ' ',
+'png_url': 'https://metabolomics-usi.gnps2.org/png/?usi1=mzspec:GNPS:GNPS-LIBRARY:accession:CCMSLIB00000086167',
+'json_url': 'https://metabolomics-usi.gnps2.org/json/?usi1=mzspec:GNPS:GNPS-LIBRARY:accession:CCMSLIB00000086167',
+'svg_url': 'https://metabolomics-usi.gnps2.org/svg/?usi1=mzspec:GNPS:GNPS-LIBRARY:accession:CCMSLIB00000086167',
+'spectrum_url': 'https://metabolomics-usi.gnps2.org/spectrum/?usi1=mzspec:GNPS:GNPS-LIBRARY:accession:CCMSLIB00000086167'}
+
+ +
+ Source code in src/nplinker/metabolomics/gnps/gnps_annotation_loader.py +
28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+58
+59
+60
+61
+62
+63
+64
+65
+66
+67
+68
+69
+70
+71
+72
+73
+74
+75
+76
+77
+78
+79
+80
def __init__(self, file: str | PathLike):
+    """Initialize the GNPSAnnotationLoader.
+
+    Args:
+        file: The GNPS annotation file.
+
+    Examples:
+        >>> loader = GNPSAnnotationLoader("gnps_annotations.tsv")
+        >>> print(loader.annotations["100"])
+        {'#Scan#': '100',
+        'Adduct': 'M+H',
+        'CAS_Number': 'N/A',
+        'Charge': '1',
+        'Compound_Name': 'MLS002153841-01!Iobenguane sulfate',
+        'Compound_Source': 'NIH Pharmacologically Active Library',
+        'Data_Collector': 'VP/LMS',
+        'ExactMass': '274.992',
+        'INCHI': 'N/A',
+        'INCHI_AUX': 'N/A',
+        'Instrument': 'qTof',
+        'IonMode': 'Positive',
+        'Ion_Source': 'LC-ESI',
+        'LibMZ': '276.003',
+        'LibraryName': 'lib-00014.mgf',
+        'LibraryQualityString': 'Gold',
+        'Library_Class': '1',
+        'MQScore': '0.704152',
+        'MZErrorPPM': '405416',
+        'MassDiff': '111.896',
+        'Organism': 'GNPS-NIH-SMALLMOLECULEPHARMACOLOGICALLYACTIVE',
+        'PI': 'Dorrestein',
+        'Precursor_MZ': '276.003',
+        'Pubmed_ID': 'N/A',
+        'RT_Query': '795.979',
+        'SharedPeaks': '7',
+        'Smiles': 'NC(=N)NCc1cccc(I)c1.OS(=O)(=O)O',
+        'SpecCharge': '1',
+        'SpecMZ': '164.107',
+        'SpectrumFile': 'spectra/specs_ms.pklbin',
+        'SpectrumID': 'CCMSLIB00000086167',
+        'TIC_Query': '986.997',
+        'UpdateWorkflowName': 'UPDATE-SINGLE-ANNOTATED-GOLD',
+        'tags': ' ',
+        'png_url': 'https://metabolomics-usi.gnps2.org/png/?usi1=mzspec:GNPS:GNPS-LIBRARY:accession:CCMSLIB00000086167',
+        'json_url': 'https://metabolomics-usi.gnps2.org/json/?usi1=mzspec:GNPS:GNPS-LIBRARY:accession:CCMSLIB00000086167',
+        'svg_url': 'https://metabolomics-usi.gnps2.org/svg/?usi1=mzspec:GNPS:GNPS-LIBRARY:accession:CCMSLIB00000086167',
+        'spectrum_url': 'https://metabolomics-usi.gnps2.org/spectrum/?usi1=mzspec:GNPS:GNPS-LIBRARY:accession:CCMSLIB00000086167'}
+    """
+    self._file = Path(file)
+    self._annotations: dict[str, dict] = {}
+
+    self._validate()
+    self._load()
+
+
+ + + +
+ + + + + + + +
+ + + +

+ annotations + + + + property + + +

+
annotations: dict[str, dict]
+
+ +
+ +

Get annotations.

+ + +

Returns:

+ + + + + + + + + + + + + + + + + +
TypeDescription
+ dict[str, dict] + +
+

Keys are spectrum ids ("#Scan#" in annotation file) and values are the annotations dict

+
+
+ dict[str, dict] + +
+

for each spectrum.

+
+
+
+ +
+ + + + + +
+ +
+ +
+ +
+ + + +

+ GNPSFileMappingLoader + + +

+
GNPSFileMappingLoader(file: str | PathLike)
+
+ +
+

+ Bases: FileMappingLoaderBase

+ + +

Class to load file mappings from GNPS output file.

+

File mappings refers to the mapping from spectrum id to files in which +this spectrum occurs.

+

The file mappings file is from GNPS output archive, as described below +for each GNPS workflow type:

+
    +
  1. METABOLOMICS-SNETS
      +
    • clusterinfosummarygroup_attributes_withIDs_withcomponentID/*.tsv
    • +
    +
  2. +
  3. METABOLOMICS-SNETS-V2
      +
    • clusterinfosummarygroup_attributes_withIDs_withcomponentID/*.clustersummary
    • +
    +
  4. +
  5. FEATURE-BASED-MOLECULAR-NETWORKING
      +
    • quantification_table/.csv
    • +
    +
  6. +
+ + + +

Parameters:

+ + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
file + str | PathLike + +
+

Path to the GNPS file mappings file.

+
+
+ required +
+ + +

Raises:

+ + + + + + + + + + + + + +
TypeDescription
+ ValueError + +
+

Raises ValueError if the file is not valid.

+
+
+ + +

Examples:

+
>>> loader = GNPSFileMappingLoader("gnps_file_mappings.tsv")
+>>> print(loader.mappings["1"])
+['26c.mzXML']
+>>> print(loader.mapping_reversed["26c.mzXML"])
+{'1', '3', '7', ...}
+
+ +
+ Source code in src/nplinker/metabolomics/gnps/gnps_file_mapping_loader.py +
28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+52
def __init__(self, file: str | PathLike):
+    """Initialize the GNPSFileMappingLoader.
+
+    Args:
+        file: Path to the GNPS file mappings file.
+
+    Raises:
+        ValueError: Raises ValueError if the file is not valid.
+
+    Examples:
+        >>> loader = GNPSFileMappingLoader("gnps_file_mappings.tsv")
+        >>> print(loader.mappings["1"])
+        ['26c.mzXML']
+        >>> print(loader.mapping_reversed["26c.mzXML"])
+        {'1', '3', '7', ...}
+    """
+    self._gnps_format = gnps_format_from_file_mapping(file)
+    if self._gnps_format is GNPSFormat.Unknown:
+        raise ValueError("Unknown workflow type for GNPS file mappings file ")
+
+    self._file = Path(file)
+    self._mapping: dict[str, list[str]] = {}
+
+    self._validate()
+    self._load()
+
+
+ + + +
+ + + + + + + +
+ + + +

+ mappings + + + + property + + +

+
mappings: dict[str, list[str]]
+
+ +
+ +

Return mapping from spectrum id to files in which this spectrum occurs.

+ + +

Returns:

+ + + + + + + + + + + + + +
TypeDescription
+ dict[str, list[str]] + +
+

Mapping from spectrum id to names of all files in which this spectrum occurs.

+
+
+
+ +
+ +
+ + + +

+ mapping_reversed + + + + property + + +

+
mapping_reversed: dict[str, set[str]]
+
+ +
+ +

Return mapping from file name to all spectra that occur in this file.

+ + +

Returns:

+ + + + + + + + + + + + + +
TypeDescription
+ dict[str, set[str]] + +
+

Mapping from file name to all spectra ids that occur in this file.

+
+
+
+ +
+ + + + + +
+ +
+ +
+ + +
+ + +

+ gnps_format_from_archive + + +

+
gnps_format_from_archive(
+    zip_file: str | PathLike,
+) -> GNPSFormat
+
+ +
+ +

Detect GNPS format from a downloaded GNPS zip archive.

+

The detection is based on the filename of the zip file and the names of the +files contained in the zip file.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
zip_file + str | PathLike + +
+

Path to the downloaded GNPS zip file.

+
+
+ required +
+ + +

Returns:

+ + + + + + + + + + + + + +
TypeDescription
+ GNPSFormat + +
+

The format identified in the GNPS zip file.

+
+
+ + +

Examples:

+
>>> gnps_format_from_archive("downloads/ProteoSAFe-METABOLOMICS-SNETS-c22f44b1-download_clustered_spectra.zip") == GNPSFormat.SNETS
+>>> gnps_format_from_archive("downloads/ProteoSAFe-METABOLOMICS-SNETS-V2-189e8bf1-download_clustered_spectra.zip") == GNPSFormat.SNETSV2
+>>> gnps_format_from_archive("downloads/ProteoSAFe-FEATURE-BASED-MOLECULAR-NETWORKING-672d0a53-download_cytoscape_data.zip") == GNPSFormat.FBMN
+
+ +
+ Source code in src/nplinker/metabolomics/gnps/gnps_format.py +
def gnps_format_from_archive(zip_file: str | PathLike) -> GNPSFormat:
+    """Detect GNPS format from a downloaded GNPS zip archive.
+
+    The detection is based on the filename of the zip file and the names of the
+    files contained in the zip file.
+
+    Args:
+        zip_file: Path to the downloaded GNPS zip file.
+
+    Returns:
+        The format identified in the GNPS zip file.
+
+    Examples:
+        >>> gnps_format_from_archive("downloads/ProteoSAFe-METABOLOMICS-SNETS-c22f44b1-download_clustered_spectra.zip") == GNPSFormat.SNETS
+        >>> gnps_format_from_archive("downloads/ProteoSAFe-METABOLOMICS-SNETS-V2-189e8bf1-download_clustered_spectra.zip") == GNPSFormat.SNETSV2
+        >>> gnps_format_from_archive("downloads/ProteoSAFe-FEATURE-BASED-MOLECULAR-NETWORKING-672d0a53-download_cytoscape_data.zip") == GNPSFormat.FBMN
+    """
+    file = Path(zip_file)
+    # Guess the format from the filename of the zip file
+    if GNPSFormat.FBMN.value in file.name:
+        return GNPSFormat.FBMN
+    # the order of the if statements matters for the following two
+    if GNPSFormat.SNETSV2.value in file.name:
+        return GNPSFormat.SNETSV2
+    if GNPSFormat.SNETS.value in file.name:
+        return GNPSFormat.SNETS
+
+    # Guess the format from the names of the files in the zip file
+    with zipfile.ZipFile(file) as archive:
+        filenames = archive.namelist()
+    if any(GNPSFormat.FBMN.value in x for x in filenames):
+        return GNPSFormat.FBMN
+    # the order of the if statements matters for the following two
+    if any(GNPSFormat.SNETSV2.value in x for x in filenames):
+        return GNPSFormat.SNETSV2
+    if any(GNPSFormat.SNETS.value in x for x in filenames):
+        return GNPSFormat.SNETS
+
+    return GNPSFormat.Unknown
+
+
+
+ +
+ +
+ + +

+ gnps_format_from_file_mapping + + +

+
gnps_format_from_file_mapping(
+    file: str | PathLike,
+) -> GNPSFormat
+
+ +
+ +

Detect GNPS format from the given file mapping file.

+

The GNPS file mapping file is located in different folders depending on the +GNPS workflow. Here are the locations in corresponding GNPS zip archives:

+
    +
  • METABOLOMICS-SNETS workflow: the .tsv file under folder "clusterinfosummarygroup_attributes_withIDs_withcomponentID"
  • +
  • METABOLOMICS-SNETS-V2 workflow: the .clustersummary file (tsv) under folder "clusterinfosummarygroup_attributes_withIDs_withcomponentID"
  • +
  • FEATURE-BASED-MOLECULAR-NETWORKING workflow: the .csv file under folder "quantification_table"
  • +
+ + +

Parameters:

+ + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
file + str | PathLike + +
+

Path to the file to peek the format for.

+
+
+ required +
+ + +

Returns:

+ + + + + + + + + + + + + +
TypeDescription
+ GNPSFormat + +
+

GNPS format identified in the file.

+
+
+ +
+ Source code in src/nplinker/metabolomics/gnps/gnps_format.py +
def gnps_format_from_file_mapping(file: str | PathLike) -> GNPSFormat:
+    """Detect GNPS format from the given file mapping file.
+
+    The GNPS file mapping file is located in different folders depending on the
+    GNPS workflow. Here are the locations in corresponding GNPS zip archives:
+
+    - METABOLOMICS-SNETS workflow: the .tsv file under folder "clusterinfosummarygroup_attributes_withIDs_withcomponentID"
+    - METABOLOMICS-SNETS-V2 workflow: the .clustersummary file (tsv) under folder "clusterinfosummarygroup_attributes_withIDs_withcomponentID"
+    - FEATURE-BASED-MOLECULAR-NETWORKING workflow: the .csv file under folder "quantification_table"
+
+    Args:
+        file: Path to the file to peek the format for.
+
+    Returns:
+        GNPS format identified in the file.
+    """
+    with open(file, "r") as f:
+        header = f.readline().strip()
+
+    if re.search(r"\bAllFiles\b", header):
+        return GNPSFormat.SNETS
+    if re.search(r"\bUniqueFileSources\b", header):
+        return GNPSFormat.SNETSV2
+    if re.search(r"\b{}\b".format(re.escape("row ID")), header):
+        return GNPSFormat.FBMN
+    return GNPSFormat.Unknown
+
+
+
+ +
+ +
+ + +

+ gnps_format_from_task_id + + +

+
gnps_format_from_task_id(task_id: str) -> GNPSFormat
+
+ +
+ +

Detect GNPS format for the given task id.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
task_id + str + +
+

GNPS task id.

+
+
+ required +
+ + +

Returns:

+ + + + + + + + + + + + + +
TypeDescription
+ GNPSFormat + +
+

The format identified in the GNPS task.

+
+
+ + +

Examples:

+
>>> gnps_format_from_task_id("c22f44b14a3d450eb836d607cb9521bb") == GNPSFormat.SNETS
+>>> gnps_format_from_task_id("189e8bf16af145758b0a900f1c44ff4a") == GNPSFormat.SNETSV2
+>>> gnps_format_from_task_id("92036537c21b44c29e509291e53f6382") == GNPSFormat.FBMN
+>>> gnps_format_from_task_id("0ad6535e34d449788f297e712f43068a") == GNPSFormat.Unknown
+
+ +
+ Source code in src/nplinker/metabolomics/gnps/gnps_format.py +
31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+58
+59
+60
+61
def gnps_format_from_task_id(task_id: str) -> GNPSFormat:
+    """Detect GNPS format for the given task id.
+
+    Args:
+        task_id: GNPS task id.
+
+    Returns:
+        The format identified in the GNPS task.
+
+    Examples:
+        >>> gnps_format_from_task_id("c22f44b14a3d450eb836d607cb9521bb") == GNPSFormat.SNETS
+        >>> gnps_format_from_task_id("189e8bf16af145758b0a900f1c44ff4a") == GNPSFormat.SNETSV2
+        >>> gnps_format_from_task_id("92036537c21b44c29e509291e53f6382") == GNPSFormat.FBMN
+        >>> gnps_format_from_task_id("0ad6535e34d449788f297e712f43068a") == GNPSFormat.Unknown
+    """
+    task_html = httpx.get(GNPS_TASK_URL.format(task_id))
+    soup = BeautifulSoup(task_html.text, features="html.parser")
+    try:
+        # find the td tag that follows the th tag containing 'Workflow'
+        workflow_tag = soup.find("th", string="Workflow").find_next_sibling("td")  # type: ignore
+        workflow_format = workflow_tag.contents[0].strip()  # type: ignore
+    except AttributeError:
+        return GNPSFormat.Unknown
+
+    if workflow_format == GNPSFormat.FBMN.value:
+        return GNPSFormat.FBMN
+    if workflow_format == GNPSFormat.SNETSV2.value:
+        return GNPSFormat.SNETSV2
+    if workflow_format == GNPSFormat.SNETS.value:
+        return GNPSFormat.SNETS
+    return GNPSFormat.Unknown
+
+
+
+ +
+ + + +
+ +
+ +
+ + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/2.0.0a3/api/loader/index.html b/2.0.0a3/api/loader/index.html new file mode 100644 index 00000000..4253e721 --- /dev/null +++ b/2.0.0a3/api/loader/index.html @@ -0,0 +1,2537 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + Dataset Loader - NPLinker + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + +

Dataset Loader

+ +
+ + + +

+ loader + + +

+ +
+ + + +
+ + + + + + + +
+ + + +

+ logger + + + + module-attribute + + +

+
logger = getLogger(__name__)
+
+ +
+
+ +
+ + +
+ + + +

+ DatasetLoader + + +

+
DatasetLoader(config: Dynaconf)
+
+ +
+ + +

Class to load all data.

+ + +

Attributes:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescription
config + +
+

A Dynaconf object that contains the configuration settings. Check the +nplinker.config module for more information.

+
+
bgcs + list[BGC] + +
+

A list of BGC objects.

+
+
gcfs + list[GCF] + +
+

A list of GCF objects.

+
+
spectra + list[Spectrum] + +
+

A list of Spectrum objects.

+
+
mfs + list[MolecularFamily] + +
+

A list of MolecularFamily objects.

+
+
mibig_bgcs + list[BGC] + +
+

A list of MIBiG BGC objects.

+
+
mibig_strains_in_use + StrainCollection + +
+

A StrainCollection object that contains the strains in use from MIBiG.

+
+
product_types + list + +
+

A list of product types.

+
+
strains + StrainCollection + +
+

A StrainCollection object that contains all strains.

+
+
class_matches + +
+

A ClassMatches object that contains class match info.

+
+
chem_classes + +
+

A ChemClassPredictions object that contains chemical class predictions.

+
+
+ + + +

Parameters:

+ + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
config + Dynaconf + +
+

A Dynaconf object that contains the configuration settings. Check the +nplinker.config module for more information.

+
+
+ required +
+ +
+ Source code in src/nplinker/loader.py +
57
+58
+59
+60
+61
+62
+63
+64
+65
+66
+67
+68
+69
+70
+71
+72
+73
+74
+75
+76
def __init__(self, config: Dynaconf):
+    """Initialize the DatasetLoader.
+
+    Args:
+        config: A Dynaconf object that contains the configuration settings. Check the
+            `nplinker.config` module for more information.
+    """
+    self.config = config
+
+    self.bgcs: list[BGC] = []
+    self.gcfs: list[GCF] = []
+    self.spectra: list[Spectrum] = []
+    self.mfs: list[MolecularFamily] = []
+    self.mibig_bgcs: list[BGC] = []
+    self.mibig_strains_in_use: StrainCollection = StrainCollection()
+    self.product_types: list = []
+    self.strains: StrainCollection = StrainCollection()
+
+    self.class_matches = None
+    self.chem_classes = None
+
+
+ + + +
+ + + + + + + +
+ + + +

+ RUN_CANOPUS_DEFAULT + + + + class-attribute + instance-attribute + + +

+
RUN_CANOPUS_DEFAULT = False
+
+ +
+
+ +
+ +
+ + + +

+ EXTRA_CANOPUS_PARAMS_DEFAULT + + + + class-attribute + instance-attribute + + +

+
EXTRA_CANOPUS_PARAMS_DEFAULT = (
+    "--maxmz 600 formula zodiac structure canopus"
+)
+
+ +
+
+ +
+ +
+ + + +

+ OR_CANOPUS + + + + class-attribute + instance-attribute + + +

+
OR_CANOPUS = 'canopus_dir'
+
+ +
+
+ +
+ +
+ + + +

+ OR_MOLNETENHANCER + + + + class-attribute + instance-attribute + + +

+
OR_MOLNETENHANCER = 'molnetenhancer_dir'
+
+ +
+
+ +
+ +
+ + + +

+ config + + + + instance-attribute + + +

+
config = config
+
+ +
+
+ +
+ +
+ + + +

+ bgcs + + + + instance-attribute + + +

+
bgcs: list[BGC] = []
+
+ +
+
+ +
+ +
+ + + +

+ gcfs + + + + instance-attribute + + +

+
gcfs: list[GCF] = []
+
+ +
+
+ +
+ +
+ + + +

+ spectra + + + + instance-attribute + + +

+
spectra: list[Spectrum] = []
+
+ +
+
+ +
+ +
+ + + +

+ mfs + + + + instance-attribute + + +

+
mfs: list[MolecularFamily] = []
+
+ +
+
+ +
+ +
+ + + +

+ mibig_bgcs + + + + instance-attribute + + +

+
mibig_bgcs: list[BGC] = []
+
+ +
+
+ +
+ +
+ + + +

+ mibig_strains_in_use + + + + instance-attribute + + +

+
mibig_strains_in_use: StrainCollection = StrainCollection()
+
+ +
+
+ +
+ +
+ + + +

+ product_types + + + + instance-attribute + + +

+
product_types: list = []
+
+ +
+
+ +
+ +
+ + + +

+ strains + + + + instance-attribute + + +

+ + +
+
+ +
+ +
+ + + +

+ class_matches + + + + instance-attribute + + +

+
class_matches = None
+
+ +
+
+ +
+ +
+ + + +

+ chem_classes + + + + instance-attribute + + +

+
chem_classes = None
+
+ +
+
+ +
+ + + +
+ + +

+ load + + +

+
load()
+
+ +
+ +

Load all data.

+ +
+ Source code in src/nplinker/loader.py +
78
+79
+80
+81
+82
+83
+84
+85
+86
+87
+88
+89
+90
+91
+92
+93
+94
+95
def load(self):
+    """Load all data."""
+    if not self._load_strain_mappings():
+        return False
+
+    if not self._load_metabolomics():
+        return False
+
+    if not self._load_genomics():
+        return False
+
+    # set self.strains with all strains from input plus mibig strains in use
+    self.strains = self.strains + self.mibig_strains_in_use
+
+    if len(self.strains) == 0:
+        raise Exception("Failed to find *ANY* strains.")
+
+    return True
+
+
+
+ +
+ + + +
+ +
+ +
+ + + + +
+ +
+ +
+ + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/2.0.0a3/api/metabolomics/index.html b/2.0.0a3/api/metabolomics/index.html new file mode 100644 index 00000000..33dd6d76 --- /dev/null +++ b/2.0.0a3/api/metabolomics/index.html @@ -0,0 +1,3161 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + Data Models - NPLinker + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + +

Data Models

+ +
+ + + +

+ metabolomics + + +

+ +
+ + + +
+ + + + + + + + +
+ + + +

+ MolecularFamily + + +

+
MolecularFamily(id: str)
+
+ +
+ + +

Class to model molecular family.

+ + +

Attributes:

+ + + + + + + + + + + + + + + + + + + + +
NameTypeDescription
id + str + +
+

Unique id for the molecular family.

+
+
spectra_ids + set[str] + +
+

Set of spectrum ids in the molecular family.

+
+
+ + + +

Parameters:

+ + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
id + str + +
+

Unique id for the molecular family.

+
+
+ required +
+ +
+ Source code in src/nplinker/metabolomics/molecular_family.py +
19
+20
+21
+22
+23
+24
+25
+26
+27
+28
def __init__(self, id: str):
+    """Initialize the MolecularFamily.
+
+    Args:
+        id: Unique id for the molecular family.
+    """
+    self.id: str = id
+    self.spectra_ids: set[str] = set()
+    self._spectra: set[Spectrum] = set()
+    self._strains: StrainCollection = StrainCollection()
+
+
+ + + +
+ + + + + + + +
+ + + +

+ id + + + + instance-attribute + + +

+
id: str = id
+
+ +
+
+ +
+ +
+ + + +

+ spectra_ids + + + + instance-attribute + + +

+
spectra_ids: set[str] = set()
+
+ +
+
+ +
+ +
+ + + +

+ spectra + + + + property + + +

+
spectra: set[Spectrum]
+
+ +
+ +

Get Spectrum objects in the molecular family.

+
+ +
+ +
+ + + +

+ strains + + + + property + + +

+
strains: StrainCollection
+
+ +
+ +

Get strains in the molecular family.

+
+ +
+ + + +
+ + +

+ add_spectrum + + +

+
add_spectrum(spectrum: Spectrum) -> None
+
+ +
+ +

Add a Spectrum object to the molecular family.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
spectrum + Spectrum + +
+

Spectrum object to add to the molecular family.

+
+
+ required +
+ +
+ Source code in src/nplinker/metabolomics/molecular_family.py +
61
+62
+63
+64
+65
+66
+67
+68
+69
+70
+71
def add_spectrum(self, spectrum: Spectrum) -> None:
+    """Add a Spectrum object to the molecular family.
+
+    Args:
+        spectrum: `Spectrum` object to add to the molecular family.
+    """
+    self._spectra.add(spectrum)
+    self.spectra_ids.add(spectrum.id)
+    self._strains = self._strains + spectrum.strains
+    # add the molecular family to the spectrum
+    spectrum.family = self
+
+
+
+ +
+ +
+ + +

+ detach_spectrum + + +

+
detach_spectrum(spectrum: Spectrum) -> None
+
+ +
+ +

Remove a Spectrum object from the molecular family.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
spectrum + Spectrum + +
+

Spectrum object to remove from the molecular family.

+
+
+ required +
+ +
+ Source code in src/nplinker/metabolomics/molecular_family.py +
73
+74
+75
+76
+77
+78
+79
+80
+81
+82
+83
def detach_spectrum(self, spectrum: Spectrum) -> None:
+    """Remove a Spectrum object from the molecular family.
+
+    Args:
+        spectrum: `Spectrum` object to remove from the molecular family.
+    """
+    self._spectra.remove(spectrum)
+    self.spectra_ids.remove(spectrum.id)
+    self._strains = self._update_strains()
+    # remove the molecular family from the spectrum
+    spectrum.family = None
+
+
+
+ +
+ +
+ + +

+ has_strain + + +

+
has_strain(strain: Strain) -> bool
+
+ +
+ +

Check if the given strain exists.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
strain + Strain + +
+

Strain object.

+
+
+ required +
+ + +

Returns:

+ + + + + + + + + + + + + +
TypeDescription
+ bool + +
+

True when the given strain exists.

+
+
+ +
+ Source code in src/nplinker/metabolomics/molecular_family.py +
85
+86
+87
+88
+89
+90
+91
+92
+93
+94
def has_strain(self, strain: Strain) -> bool:
+    """Check if the given strain exists.
+
+    Args:
+        strain: `Strain` object.
+
+    Returns:
+        True when the given strain exists.
+    """
+    return strain in self._strains
+
+
+
+ +
+ +
+ + +

+ is_singleton + + +

+
is_singleton() -> bool
+
+ +
+ +

Check if the molecular family contains only one spectrum.

+ + +

Returns:

+ + + + + + + + + + + + + +
TypeDescription
+ bool + +
+

True when MolecularFamily.spectra_ids contains only one spectrum id.

+
+
+ +
+ Source code in src/nplinker/metabolomics/molecular_family.py +
def is_singleton(self) -> bool:
+    """Check if the molecular family contains only one spectrum.
+
+    Returns:
+        True when `MolecularFamily.spectra_ids` contains only one spectrum id.
+    """
+    return len(self.spectra_ids) == 1
+
+
+
+ +
+ + + +
+ +
+ +
+ +
+ + + +

+ Spectrum + + +

+
Spectrum(
+    id: str,
+    mz: list[float],
+    intensity: list[float],
+    precursor_mz: float,
+    rt: float = 0,
+    metadata: dict | None = None,
+)
+
+ +
+ + +

Class to model MS/MS Spectrum.

+ + +

Attributes:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescription
id + +
+

the spectrum ID.

+
+
mz + +
+

the list of m/z values.

+
+
intensity + +
+

the list of intensity values.

+
+
precursor_mz + +
+

the m/z value of the precursor.

+
+
rt + +
+

the retention time in seconds.

+
+
metadata + +
+

the metadata of the spectrum, i.e. the header information in the MGF +file.

+
+
gnps_annotations + dict + +
+

the GNPS annotations of the spectrum.

+
+
gnps_id + str | None + +
+

the GNPS ID of the spectrum.

+
+
strains + StrainCollection + +
+

the strains that this spectrum belongs to.

+
+
family + MolecularFamily | None + +
+

the molecular family that this spectrum belongs to.

+
+
peaks + ndarray + +
+

2D array of peaks, each row is a peak of (m/z, intensity) values.

+
+
+ + + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
id + str + +
+

the spectrum ID.

+
+
+ required +
mz + list[float] + +
+

the list of m/z values.

+
+
+ required +
intensity + list[float] + +
+

the list of intensity values.

+
+
+ required +
precursor_mz + float + +
+

the precursor m/z.

+
+
+ required +
rt + float + +
+

the retention time in seconds. Defaults to 0.

+
+
+ 0 +
metadata + dict | None + +
+

the metadata of the spectrum, i.e. the header information +in the MGF file.

+
+
+ None +
+ +
+ Source code in src/nplinker/metabolomics/spectrum.py +
31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+58
+59
+60
+61
def __init__(
+    self,
+    id: str,
+    mz: list[float],
+    intensity: list[float],
+    precursor_mz: float,
+    rt: float = 0,
+    metadata: dict | None = None,
+) -> None:
+    """Initialize the Spectrum.
+
+    Args:
+        id: the spectrum ID.
+        mz: the list of m/z values.
+        intensity: the list of intensity values.
+        precursor_mz: the precursor m/z.
+        rt: the retention time in seconds. Defaults to 0.
+        metadata: the metadata of the spectrum, i.e. the header information
+            in the MGF file.
+    """
+    self.id = id
+    self.mz = mz
+    self.intensity = intensity
+    self.precursor_mz = precursor_mz
+    self.rt = rt
+    self.metadata = metadata or {}
+
+    self.gnps_annotations: dict = {}
+    self.gnps_id: str | None = None
+    self.strains: StrainCollection = StrainCollection()
+    self.family: MolecularFamily | None = None
+
+
+ + + +
+ + + + + + + +
+ + + +

+ id + + + + instance-attribute + + +

+
id = id
+
+ +
+
+ +
+ +
+ + + +

+ mz + + + + instance-attribute + + +

+
mz = mz
+
+ +
+
+ +
+ +
+ + + +

+ intensity + + + + instance-attribute + + +

+
intensity = intensity
+
+ +
+
+ +
+ +
+ + + +

+ precursor_mz + + + + instance-attribute + + +

+
precursor_mz = precursor_mz
+
+ +
+
+ +
+ +
+ + + +

+ rt + + + + instance-attribute + + +

+
rt = rt
+
+ +
+
+ +
+ +
+ + + +

+ metadata + + + + instance-attribute + + +

+
metadata = metadata or {}
+
+ +
+
+ +
+ +
+ + + +

+ gnps_annotations + + + + instance-attribute + + +

+
gnps_annotations: dict = {}
+
+ +
+
+ +
+ +
+ + + +

+ gnps_id + + + + instance-attribute + + +

+
gnps_id: str | None = None
+
+ +
+
+ +
+ +
+ + + +

+ strains + + + + instance-attribute + + +

+ + +
+
+ +
+ +
+ + + +

+ family + + + + instance-attribute + + +

+
family: MolecularFamily | None = None
+
+ +
+
+ +
+ +
+ + + +

+ peaks + + + + cached + property + + +

+
peaks: ndarray
+
+ +
+ +

Get the peaks, a 2D array with each row containing the values of (m/z, intensity).

+
+ +
+ + + +
+ + +

+ has_strain + + +

+
has_strain(strain: Strain) -> bool
+
+ +
+ +

Check if the given strain exists in the spectrum.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
strain + Strain + +
+

Strain object.

+
+
+ required +
+ + +

Returns:

+ + + + + + + + + + + + + +
TypeDescription
+ bool + +
+

True when the given strain exist in the spectrum.

+
+
+ +
+ Source code in src/nplinker/metabolomics/spectrum.py +
90
+91
+92
+93
+94
+95
+96
+97
+98
+99
def has_strain(self, strain: Strain) -> bool:
+    """Check if the given strain exists in the spectrum.
+
+    Args:
+        strain: `Strain` object.
+
+    Returns:
+        True when the given strain exist in the spectrum.
+    """
+    return strain in self.strains
+
+
+
+ +
+ + + +
+ +
+ +
+ + + + +
+ +
+ +
+ + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/2.0.0a3/api/metabolomics_abc/index.html b/2.0.0a3/api/metabolomics_abc/index.html new file mode 100644 index 00000000..762d50b0 --- /dev/null +++ b/2.0.0a3/api/metabolomics_abc/index.html @@ -0,0 +1,2151 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + Abstract Base Classes - NPLinker + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Abstract Base Classes

+ +
+ + + +

+ abc + + +

+ +
+ + + +
+ + + + + + + + +
+ + + +

+ SpectrumLoaderBase + + +

+ + +
+

+ Bases: ABC

+ + +

Abstract base class for SpectrumLoader.

+ + + + +
+ + + + + + + +
+ + + +

+ spectra + + + + abstractmethod + property + + +

+
spectra: list[Spectrum]
+
+ +
+ +

Get Spectrum objects.

+ + +

Returns:

+ + + + + + + + + + + + + +
TypeDescription
+ list[Spectrum] + +
+

A sequence of Spectrum objects.

+
+
+
+ +
+ + + + + +
+ +
+ +
+ +
+ + + +

+ MolecularFamilyLoaderBase + + +

+ + +
+

+ Bases: ABC

+ + +

Abstract base class for MolecularFamilyLoader.

+ + + + +
+ + + + + + + + + +
+ + +

+ get_mfs + + + + abstractmethod + + +

+
get_mfs(keep_singleton: bool) -> list[MolecularFamily]
+
+ +
+ +

Get MolecularFamily objects.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
keep_singleton + bool + +
+

True to keep singleton molecular families. A +singleton molecular family is a molecular family that contains +only one spectrum.

+
+
+ required +
+ + +

Returns:

+ + + + + + + + + + + + + +
TypeDescription
+ list[MolecularFamily] + +
+

A sequence of MolecularFamily objects.

+
+
+ +
+ Source code in src/nplinker/metabolomics/abc.py +
24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
@abstractmethod
+def get_mfs(self, keep_singleton: bool) -> list[MolecularFamily]:
+    """Get MolecularFamily objects.
+
+    Args:
+        keep_singleton: True to keep singleton molecular families. A
+            singleton molecular family is a molecular family that contains
+            only one spectrum.
+
+    Returns:
+        A sequence of MolecularFamily objects.
+    """
+
+
+
+ +
+ + + +
+ +
+ +
+ +
+ + + +

+ FileMappingLoaderBase + + +

+ + +
+

+ Bases: ABC

+ + +

Abstract base class for FileMappingLoader.

+ + + + +
+ + + + + + + +
+ + + +

+ mappings + + + + abstractmethod + property + + +

+
mappings: dict[str, list[str]]
+
+ +
+ +

Get file mappings.

+ + +

Returns:

+ + + + + + + + + + + + + +
TypeDescription
+ dict[str, list[str]] + +
+

A mapping from spectrum ID to the names of files where the spectrum occurs.

+
+
+
+ +
+ + + + + +
+ +
+ +
+ +
+ + + +

+ AnnotationLoaderBase + + +

+ + +
+

+ Bases: ABC

+ + +

Abstract base class for AnnotationLoader.

+ + + + +
+ + + + + + + +
+ + + +

+ annotations + + + + abstractmethod + property + + +

+
annotations: dict[str, dict]
+
+ +
+ +

Get annotations.

+ + +

Returns:

+ + + + + + + + + + + + + +
TypeDescription
+ dict[str, dict] + +
+

A mapping from spectrum ID to its annotations.

+
+
+
+ +
+ + + + + +
+ +
+ +
+ + + + +
+ +
+ +
+ + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/2.0.0a3/api/metabolomics_utils/index.html b/2.0.0a3/api/metabolomics_utils/index.html new file mode 100644 index 00000000..8858c778 --- /dev/null +++ b/2.0.0a3/api/metabolomics_utils/index.html @@ -0,0 +1,2637 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + Utilities - NPLinker + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + +

Utilities

+ +
+ + + +

+ utils + + +

+ +
+ + + +
+ + + + + + + +
+ + + +

+ logger + + + + module-attribute + + +

+
logger = getLogger(__name__)
+
+ +
+
+ +
+ + + +
+ + +

+ add_annotation_to_spectrum + + +

+
add_annotation_to_spectrum(
+    annotations: Mapping[str, dict],
+    spectra: Sequence[Spectrum],
+) -> None
+
+ +
+ +

Add GNPS annotations to the Spectrum.gnps_annotations attribute for input spectra.

+

It is possible that some spectra don't have annotations. +Note that the input spectra list is changed in place.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
annotations + Mapping[str, dict] + +
+

A dictionary of GNPS annotations, where the keys are +spectrum ids and the values are GNPS annotations.

+
+
+ required +
spectra + Sequence[Spectrum] + +
+

A list of Spectrum objects.

+
+
+ required +
+ +
+ Source code in src/nplinker/metabolomics/utils.py +
18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
def add_annotation_to_spectrum(
+    annotations: Mapping[str, dict], spectra: Sequence[Spectrum]
+) -> None:
+    """Add GNPS annotations to the `Spectrum.gnps_annotations` attribute for input spectra.
+
+    It is possible that some spectra don't have annotations.
+    Note that the input `spectra` list is changed in place.
+
+    Args:
+        annotations: A dictionary of GNPS annotations, where the keys are
+            spectrum ids and the values are GNPS annotations.
+        spectra: A list of Spectrum objects.
+    """
+    for spec in spectra:
+        if spec.id in annotations:
+            spec.gnps_annotations = annotations[spec.id]
+
+
+
+ +
+ +
+ + +

+ add_strains_to_spectrum + + +

+
add_strains_to_spectrum(
+    strains: StrainCollection, spectra: Sequence[Spectrum]
+) -> tuple[list[Spectrum], list[Spectrum]]
+
+ +
+ +

Add Strain objects to the Spectrum.strains attribute for input spectra.

+

Note that the input spectra list is changed in place.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
strains + StrainCollection + +
+

A collection of strain objects.

+
+
+ required +
spectra + Sequence[Spectrum] + +
+

A list of Spectrum objects.

+
+
+ required +
+ + +

Returns:

+ + + + + + + + + + + + + +
TypeDescription
+ tuple[list[Spectrum], list[Spectrum]] + +
+

A tuple of two lists of Spectrum objects,

+
    +
  • the first list contains Spectrum objects that are updated with Strain objects;
  • +
  • the second list contains Spectrum objects that are not updated with Strain objects +because no Strain objects are found.
  • +
+
+
+ +
+ Source code in src/nplinker/metabolomics/utils.py +
36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+58
+59
+60
+61
+62
+63
+64
+65
+66
+67
+68
+69
+70
+71
+72
def add_strains_to_spectrum(
+    strains: StrainCollection, spectra: Sequence[Spectrum]
+) -> tuple[list[Spectrum], list[Spectrum]]:
+    """Add `Strain` objects to the `Spectrum.strains` attribute for input spectra.
+
+    Note that the input `spectra` list is changed in place.
+
+    Args:
+        strains: A collection of strain objects.
+        spectra: A list of Spectrum objects.
+
+    Returns:
+        A tuple of two lists of Spectrum objects,
+
+            - the first list contains Spectrum objects that are updated with Strain objects;
+            - the second list contains Spectrum objects that are not updated with Strain objects
+            because no Strain objects are found.
+    """
+    spectra_with_strains = []
+    spectra_without_strains = []
+    for spec in spectra:
+        try:
+            strain_list = strains.lookup(spec.id)
+        except ValueError:
+            spectra_without_strains.append(spec)
+            continue
+
+        for strain in strain_list:
+            spec.strains.add(strain)
+        spectra_with_strains.append(spec)
+
+    logger.info(
+        f"{len(spectra_with_strains)} Spectrum objects updated with Strain objects.\n"
+        f"{len(spectra_without_strains)} Spectrum objects not updated with Strain objects."
+    )
+
+    return spectra_with_strains, spectra_without_strains
+
+
+
+ +
+ +
+ + +

+ add_spectrum_to_mf + + +

+
add_spectrum_to_mf(
+    spectra: Sequence[Spectrum],
+    mfs: Sequence[MolecularFamily],
+) -> tuple[
+    list[MolecularFamily],
+    list[MolecularFamily],
+    dict[MolecularFamily, set[str]],
+]
+
+ +
+ +

Add Spectrum objects to MolecularFamily objects.

+

The attribute of spectra_ids of MolecularFamily object contains the ids of Spectrum objects. +These ids are used to find Spectrum objects from the input spectra list. The found Spectrum +objects are added to the spectra attribute of MolecularFamily object. It is possible that +some spectrum ids are not found in the input spectra list, and so their Spectrum objects are +missing in the MolecularFamily object.

+

Note that the input mfs list is changed in place.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
spectra + Sequence[Spectrum] + +
+

A list of Spectrum objects.

+
+
+ required +
mfs + Sequence[MolecularFamily] + +
+

A list of MolecularFamily objects.

+
+
+ required +
+ + +

Returns:

+ + + + + + + + + + + + + +
TypeDescription
+ tuple[list[MolecularFamily], list[MolecularFamily], dict[MolecularFamily, set[str]]] + +
+

A tuple of three elements,

+
    +
  • the first list contains MolecularFamily objects that are updated with Spectrum objects
  • +
  • the second list contains MolecularFamily objects that are not updated with Spectrum +objects (all Spectrum objects are missing).
  • +
  • the third is a dictionary containing MolecularFamily objects as keys and a set of ids +of missing Spectrum objects as values.
  • +
+
+
+ +
+ Source code in src/nplinker/metabolomics/utils.py +
def add_spectrum_to_mf(
+    spectra: Sequence[Spectrum], mfs: Sequence[MolecularFamily]
+) -> tuple[list[MolecularFamily], list[MolecularFamily], dict[MolecularFamily, set[str]]]:
+    """Add Spectrum objects to MolecularFamily objects.
+
+    The attribute of `spectra_ids` of MolecularFamily object contains the ids of Spectrum objects.
+    These ids are used to find Spectrum objects from the input `spectra` list. The found Spectrum
+    objects are added to the `spectra` attribute of MolecularFamily object. It is possible that
+    some spectrum ids are not found in the input `spectra` list, and so their Spectrum objects are
+    missing in the MolecularFamily object.
+
+    Note that the input `mfs` list is changed in place.
+
+    Args:
+        spectra: A list of Spectrum objects.
+        mfs: A list of MolecularFamily objects.
+
+    Returns:
+        A tuple of three elements,
+
+            - the first list contains MolecularFamily objects that are updated with Spectrum objects
+            - the second list contains MolecularFamily objects that are not updated with Spectrum
+            objects (all Spectrum objects are missing).
+            - the third is a dictionary containing MolecularFamily objects as keys and a set of ids
+            of missing Spectrum objects as values.
+    """
+    spec_dict = {spec.id: spec for spec in spectra}
+    mf_with_spec = []
+    mf_without_spec = []
+    mf_missing_spec: dict[MolecularFamily, set[str]] = {}
+    for mf in mfs:
+        for spec_id in mf.spectra_ids:
+            try:
+                spec = spec_dict[spec_id]
+            except KeyError:
+                if mf not in mf_missing_spec:
+                    mf_missing_spec[mf] = {spec_id}
+                else:
+                    mf_missing_spec[mf].add(spec_id)
+                continue
+            mf.add_spectrum(spec)
+
+        if mf.spectra:
+            mf_with_spec.append(mf)
+        else:
+            mf_without_spec.append(mf)
+
+    logger.info(
+        f"{len(mf_with_spec)} MolecularFamily objects updated with Spectrum objects.\n"
+        f"{len(mf_without_spec)} MolecularFamily objects not updated with Spectrum objects.\n"
+        f"{len(mf_missing_spec)} MolecularFamily objects have missing Spectrum objects."
+    )
+    return mf_with_spec, mf_without_spec, mf_missing_spec
+
+
+
+ +
+ +
+ + +

+ extract_mappings_strain_id_ms_filename + + +

+
extract_mappings_strain_id_ms_filename(
+    podp_project_json_file: str | PathLike,
+) -> dict[str, set[str]]
+
+ +
+ +

Extract mappings "strain_id <-> MS_filename".

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
podp_project_json_file + str | PathLike + +
+

The path to the PODP project +JSON file.

+
+
+ required +
+ + +

Returns:

+ + + + + + + + + + + + + +
TypeDescription
+ dict[str, set[str]] + +
+

Key is strain id and value is a set of MS filenames.

+
+
+ + +
+ Notes +

The podp_project_json_file is the project JSON file downloaded from +PODP platform. For example, for project MSV000079284, its json file is +https://pairedomicsdata.bioinformatics.nl/api/projects/4b29ddc3-26d0-40d7-80c5-44fb6631dbf9.4.

+
+
+ Source code in src/nplinker/metabolomics/utils.py +
def extract_mappings_strain_id_ms_filename(
+    podp_project_json_file: str | PathLike,
+) -> dict[str, set[str]]:
+    """Extract mappings "strain_id <-> MS_filename".
+
+    Args:
+        podp_project_json_file: The path to the PODP project
+            JSON file.
+
+    Returns:
+        Key is strain id and value is a set of MS filenames.
+
+    Notes:
+        The `podp_project_json_file` is the project JSON file downloaded from
+        PODP platform. For example, for project MSV000079284, its json file is
+        https://pairedomicsdata.bioinformatics.nl/api/projects/4b29ddc3-26d0-40d7-80c5-44fb6631dbf9.4.
+    """
+    mappings_dict: dict[str, set[str]] = {}
+    with open(podp_project_json_file, "r") as f:
+        json_data = json.load(f)
+
+    validate_podp_json(json_data)
+
+    # Extract mappings strain id <-> metabolomics filename
+    for record in json_data["genome_metabolome_links"]:
+        strain_id = record["genome_label"]
+        # get the actual filename of the mzXML URL
+        filename = Path(record["metabolomics_file"]).name
+        if strain_id in mappings_dict:
+            mappings_dict[strain_id].add(filename)
+        else:
+            mappings_dict[strain_id] = {filename}
+    return mappings_dict
+
+
+
+ +
+ +
+ + +

+ extract_mappings_ms_filename_spectrum_id + + +

+
extract_mappings_ms_filename_spectrum_id(
+    gnps_file_mappings_file: str | PathLike,
+) -> dict[str, set[str]]
+
+ +
+ +

Extract mappings "MS_filename <-> spectrum_id".

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
gnps_file_mappings_file + str | PathLike + +
+

The path to the GNPS file mappings file (csv or +tsv).

+
+
+ required +
+ + +

Returns:

+ + + + + + + + + + + + + +
TypeDescription
+ dict[str, set[str]] + +
+

Key is MS filename and value is a set of spectrum ids.

+
+
+ + +
+ Notes +

The gnps_file_mappings_file is generated by GNPS molecular networking. It's downloaded +from GNPS website to a file with a default name defined in GNPS_FILE_MAPPINGS_FILENAME.

+
+ +
+ See Also +

GNPSFileMappingLoader: A class to load GNPS file mappings file.

+
+
+ Source code in src/nplinker/metabolomics/utils.py +
def extract_mappings_ms_filename_spectrum_id(
+    gnps_file_mappings_file: str | PathLike,
+) -> dict[str, set[str]]:
+    """Extract mappings "MS_filename <-> spectrum_id".
+
+    Args:
+        gnps_file_mappings_file: The path to the GNPS file mappings file (csv or
+            tsv).
+
+    Returns:
+        Key is MS filename and value is a set of spectrum ids.
+
+    Notes:
+        The `gnps_file_mappings_file` is generated by GNPS molecular networking. It's downloaded
+        from GNPS website to a file with a default name defined in `GNPS_FILE_MAPPINGS_FILENAME`.
+
+    See Also:
+        GNPSFileMappingLoader: A class to load GNPS file mappings file.
+    """
+    loader = GNPSFileMappingLoader(gnps_file_mappings_file)
+    return loader.mapping_reversed
+
+
+
+ +
+ +
+ + +

+ get_mappings_strain_id_spectrum_id + + +

+
get_mappings_strain_id_spectrum_id(
+    mappings_strain_id_ms_filename: Mapping[str, set[str]],
+    mappings_ms_filename_spectrum_id: Mapping[
+        str, set[str]
+    ],
+) -> dict[str, set[str]]
+
+ +
+ +

Get mappings "strain_id <-> spectrum_id".

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
mappings_strain_id_ms_filename + Mapping[str, set[str]] + +
+

Mappings +"strain_id <-> MS_filename".

+
+
+ required +
mappings_ms_filename_spectrum_id + Mapping[str, set[str]] + +
+

Mappings +"MS_filename <-> spectrum_id".

+
+
+ required +
+ + +

Returns:

+ + + + + + + + + + + + + +
TypeDescription
+ dict[str, set[str]] + +
+

Key is strain id and value is a set of spectrum ids.

+
+
+ + +
+ See Also +

extract_mappings_strain_id_ms_filename: Extract mappings + "strain_id <-> MS_filename". +extract_mappings_ms_filename_spectrum_id: Extract mappings + "MS_filename <-> spectrum_id".

+
+
+ Source code in src/nplinker/metabolomics/utils.py +
def get_mappings_strain_id_spectrum_id(
+    mappings_strain_id_ms_filename: Mapping[str, set[str]],
+    mappings_ms_filename_spectrum_id: Mapping[str, set[str]],
+) -> dict[str, set[str]]:
+    """Get mappings "strain_id <-> spectrum_id".
+
+    Args:
+        mappings_strain_id_ms_filename: Mappings
+            "strain_id <-> MS_filename".
+        mappings_ms_filename_spectrum_id: Mappings
+            "MS_filename <-> spectrum_id".
+
+    Returns:
+        Key is strain id and value is a set of spectrum ids.
+
+
+    See Also:
+        `extract_mappings_strain_id_ms_filename`: Extract mappings
+            "strain_id <-> MS_filename".
+        `extract_mappings_ms_filename_spectrum_id`: Extract mappings
+            "MS_filename <-> spectrum_id".
+    """
+    mappings_dict = {}
+    for strain_id, ms_filenames in mappings_strain_id_ms_filename.items():
+        spectrum_ids = set()
+        for ms_filename in ms_filenames:
+            if (sid := mappings_ms_filename_spectrum_id.get(ms_filename)) is not None:
+                spectrum_ids.update(sid)
+        if spectrum_ids:
+            mappings_dict[strain_id] = spectrum_ids
+    return mappings_dict
+
+
+
+ +
+ + + +
+ +
+ +
+ + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/2.0.0a3/api/mibig/index.html b/2.0.0a3/api/mibig/index.html new file mode 100644 index 00000000..43728134 --- /dev/null +++ b/2.0.0a3/api/mibig/index.html @@ -0,0 +1,2774 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + MiBIG - NPLinker + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + +

MiBIG

+ +
+ + + +

+ mibig + + +

+ +
+ + + +
+ + + + + + + + +
+ + + +

+ MibigLoader + + +

+
MibigLoader(data_dir: str | PathLike)
+
+ +
+

+ Bases: BGCLoaderBase

+ + +

Parse MIBiG metadata files and return BGC objects.

+

MIBiG metadata file (json) contains annotations/metadata information +for each BGC. See https://mibig.secondarymetabolites.org/download.

+

The MiBIG accession is used as BGC id and strain name. The loaded BGC +objects have Strain object as their strain attribute (i.e. BGC.strain).

+ + + +

Parameters:

+ + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
data_dir + str | PathLike + +
+

Path to the directory of MIBiG metadata json files

+
+
+ required +
+ +
+ Source code in src/nplinker/genomics/mibig/mibig_loader.py +
25
+26
+27
+28
+29
+30
+31
+32
+33
+34
def __init__(self, data_dir: str | PathLike):
+    """Initialize the MIBiG metadata loader.
+
+    Args:
+        data_dir: Path to the directory of MIBiG metadata json files
+    """
+    self.data_dir = str(data_dir)
+    self._file_dict = self.parse_data_dir(self.data_dir)
+    self._metadata_dict = self._parse_metadata()
+    self._bgcs = self._parse_bgcs()
+
+
+ + + +
+ + + + + + + +
+ + + +

+ data_dir + + + + instance-attribute + + +

+
data_dir = str(data_dir)
+
+ +
+
+ +
+ + + +
+ + +

+ get_files + + +

+
get_files() -> dict[str, str]
+
+ +
+ +

Get the path of all MIBiG metadata json files.

+ + +

Returns:

+ + + + + + + + + + + + + + + + + +
TypeDescription
+ dict[str, str] + +
+

The key is metadata file name (BGC accession), and the value is path to the metadata

+
+
+ dict[str, str] + +
+

json file

+
+
+ +
+ Source code in src/nplinker/genomics/mibig/mibig_loader.py +
36
+37
+38
+39
+40
+41
+42
+43
def get_files(self) -> dict[str, str]:
+    """Get the path of all MIBiG metadata json files.
+
+    Returns:
+        The key is metadata file name (BGC accession), and the value is path to the metadata
+        json file
+    """
+    return self._file_dict
+
+
+
+ +
+ +
+ + +

+ parse_data_dir + + + + staticmethod + + +

+
parse_data_dir(data_dir: str | PathLike) -> dict[str, str]
+
+ +
+ +

Parse metadata directory and return paths to all metadata json files.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
data_dir + str | PathLike + +
+

path to the directory of MIBiG metadata json files

+
+
+ required +
+ + +

Returns:

+ + + + + + + + + + + + + + + + + +
TypeDescription
+ dict[str, str] + +
+

The key is metadata file name (BGC accession), and the value is path to the metadata

+
+
+ dict[str, str] + +
+

json file

+
+
+ +
+ Source code in src/nplinker/genomics/mibig/mibig_loader.py +
45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+58
+59
+60
+61
@staticmethod
+def parse_data_dir(data_dir: str | PathLike) -> dict[str, str]:
+    """Parse metadata directory and return paths to all metadata json files.
+
+    Args:
+        data_dir: path to the directory of MIBiG metadata json files
+
+    Returns:
+        The key is metadata file name (BGC accession), and the value is path to the metadata
+        json file
+    """
+    file_dict = {}
+    json_files = list_files(data_dir, prefix="BGC", suffix=".json")
+    for file in json_files:
+        fname = Path(file).stem
+        file_dict[fname] = file
+    return file_dict
+
+
+
+ +
+ +
+ + +

+ get_metadata + + +

+
get_metadata() -> dict[str, MibigMetadata]
+
+ +
+ +

Get MibigMetadata objects.

+ + +

Returns:

+ + + + + + + + + + + + + +
TypeDescription
+ dict[str, MibigMetadata] + +
+

The key is BGC accession (file name) and the value is MibigMetadata object

+
+
+ +
+ Source code in src/nplinker/genomics/mibig/mibig_loader.py +
63
+64
+65
+66
+67
+68
+69
def get_metadata(self) -> dict[str, MibigMetadata]:
+    """Get MibigMetadata objects.
+
+    Returns:
+        The key is BGC accession (file name) and the value is MibigMetadata object
+    """
+    return self._metadata_dict
+
+
+
+ +
+ +
+ + +

+ get_bgcs + + +

+
get_bgcs() -> list[BGC]
+
+ +
+ +

Get BGC objects.

+

The BGC objects use MiBIG accession as id and have Strain object as +their strain attribute (i.e. BGC.strain), where the name of the Strain +object is also MiBIG accession.

+ + +

Returns:

+ + + + + + + + + + + + + +
TypeDescription
+ list[BGC] + +
+

A list of BGC objects

+
+
+ +
+ Source code in src/nplinker/genomics/mibig/mibig_loader.py +
83
+84
+85
+86
+87
+88
+89
+90
+91
+92
+93
def get_bgcs(self) -> list[BGC]:
+    """Get BGC objects.
+
+    The BGC objects use MiBIG accession as id and have Strain object as
+    their strain attribute (i.e. `BGC.strain`), where the name of the Strain
+    object is also MiBIG accession.
+
+    Returns:
+        A list of BGC objects
+    """
+    return self._bgcs
+
+
+
+ +
+ + + +
+ +
+ +
+ +
+ + + +

+ MibigMetadata + + +

+
MibigMetadata(file: str | PathLike)
+
+ +
+ + +

Class to model the BGC metadata/annotations defined in MIBiG.

+

MIBiG is a specification of BGC metadata and use JSON schema to +represent BGC metadata. More details see: +https://mibig.secondarymetabolites.org/download.

+ + + +

Parameters:

+ + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
file + str | PathLike + +
+

Path to the json file of MIBiG BGC metadata

+
+
+ required +
+ + +

Examples:

+
>>> metadata = MibigMetadata("/data/BGC0000001.json")
+
+ +
+ Source code in src/nplinker/genomics/mibig/mibig_metadata.py +
14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
def __init__(self, file: str | PathLike) -> None:
+    """Initialize the MIBiG metadata object.
+
+    Args:
+        file: Path to the json file of MIBiG BGC metadata
+
+    Examples:
+        >>> metadata = MibigMetadata("/data/BGC0000001.json")
+    """
+    self.file = str(file)
+    with open(self.file, "rb") as f:
+        self.metadata = json.load(f)
+
+    self._mibig_accession: str
+    self._biosyn_class: tuple[str]
+    self._parse_metadata()
+
+
+ + + +
+ + + + + + + +
+ + + +

+ file + + + + instance-attribute + + +

+
file = str(file)
+
+ +
+
+ +
+ +
+ + + +

+ metadata + + + + instance-attribute + + +

+
metadata = load(f)
+
+ +
+
+ +
+ +
+ + + +

+ mibig_accession + + + + property + + +

+
mibig_accession: str
+
+ +
+ +

Get the value of metadata item 'mibig_accession'.

+
+ +
+ +
+ + + +

+ biosyn_class + + + + property + + +

+
biosyn_class: tuple[str]
+
+ +
+ +

Get the value of metadata item 'biosyn_class'.

+

The 'biosyn_class' is biosynthetic class(es), namely the type of +natural product or secondary metabolite.

+

MIBiG defines 6 major biosynthetic classes, including +"NRP", "Polyketide", "RiPP", "Terpene", "Saccharide" and "Alkaloid". +Note that natural products created by all other biosynthetic +mechanisms fall under the category "Other". More details see +the publication: https://doi.org/10.1186/s40793-018-0318-y.

+
+ +
+ + + + + +
+ +
+ +
+ + +
+ + +

+ download_and_extract_mibig_metadata + + +

+
download_and_extract_mibig_metadata(
+    download_root: str | PathLike,
+    extract_path: str | PathLike,
+    version: str = "3.1",
+)
+
+ +
+ +

Download and extract MIBiG metadata json files.

+

Note that it does not matter whether the metadata json files are in nested folders or not in the archive, +all json files will be extracted to the same location, i.e. extract_path. The nested +folders will be removed if they exist. So the extract_path will have only json files.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
download_root + str | PathLike + +
+

Path to the directory in which to place the downloaded archive.

+
+
+ required +
extract_path + str | PathLike + +
+

Path to an empty directory where the json files will be extracted. +The directory must be empty if it exists. If it doesn't exist, the directory will be created.

+
+
+ required +
version + str + +
+

description. Defaults to "3.1".

+
+
+ '3.1' +
+ + +

Examples:

+
>>> download_and_extract_mibig_metadata("/data/download", "/data/mibig_metadata")
+
+ +
+ Source code in src/nplinker/genomics/mibig/mibig_downloader.py +
27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+58
+59
+60
+61
+62
+63
+64
+65
+66
+67
+68
+69
+70
+71
+72
+73
+74
+75
+76
+77
+78
+79
+80
+81
def download_and_extract_mibig_metadata(
+    download_root: str | os.PathLike,
+    extract_path: str | os.PathLike,
+    version: str = "3.1",
+):
+    """Download and extract MIBiG metadata json files.
+
+    Note that it does not matter whether the metadata json files are in nested folders or not in the archive,
+    all json files will be extracted to the same location, i.e. `extract_path`. The nested
+    folders will be removed if they exist. So the `extract_path` will have only json files.
+
+    Args:
+        download_root: Path to the directory in which to place the downloaded archive.
+        extract_path: Path to an empty directory where the json files will be extracted.
+            The directory must be empty if it exists. If it doesn't exist, the directory will be created.
+        version: _description_. Defaults to "3.1".
+
+    Examples:
+        >>> download_and_extract_mibig_metadata("/data/download", "/data/mibig_metadata")
+    """
+    download_root = Path(download_root)
+    extract_path = Path(extract_path)
+
+    if download_root == extract_path:
+        raise ValueError("Identical path of download directory and extract directory")
+
+    # check if extract_path is empty
+    if not extract_path.exists():
+        extract_path.mkdir(parents=True)
+    else:
+        if len(list(extract_path.iterdir())) != 0:
+            raise ValueError(f'Nonempty directory: "{extract_path}"')
+
+    # download and extract
+    md5 = _MD5_MIBIG_METADATA[version]
+    download_and_extract_archive(
+        url=MIBIG_METADATA_URL.format(version=version),
+        download_root=download_root,
+        extract_root=extract_path,
+        md5=md5,
+    )
+
+    # After extracting mibig archive, it's either one dir or many json files,
+    # if it's a dir, then move all json files from it to extract_path
+    subdirs = list_dirs(extract_path)
+    if len(subdirs) > 1:
+        raise ValueError(f"Expected one extracted directory, got {len(subdirs)}")
+
+    if len(subdirs) == 1:
+        subdir_path = subdirs[0]
+        for fname in list_files(subdir_path, prefix="BGC", suffix=".json", keep_parent=False):
+            shutil.move(os.path.join(subdir_path, fname), os.path.join(extract_path, fname))
+        # delete subdir
+        if subdir_path != extract_path:
+            shutil.rmtree(subdir_path)
+
+
+
+ +
+ +
+ + +

+ parse_bgc_metadata_json + + +

+
parse_bgc_metadata_json(file: str | PathLike) -> BGC
+
+ +
+ +

Parse MIBiG metadata file and return BGC object.

+

Note that the MiBIG accession is used as the BGC id and strain name. The BGC +object has Strain object as its strain attribute.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
file + str | PathLike + +
+

Path to the MIBiG metadata json file

+
+
+ required +
+ + +

Returns:

+ + + + + + + + + + + + + +
TypeDescription
+ BGC + +
+

BGC object

+
+
+ +
+ Source code in src/nplinker/genomics/mibig/mibig_loader.py +
def parse_bgc_metadata_json(file: str | PathLike) -> BGC:
+    """Parse MIBiG metadata file and return BGC object.
+
+    Note that the MiBIG accession is used as the BGC id and strain name. The BGC
+    object has Strain object as its strain attribute.
+
+    Args:
+        file: Path to the MIBiG metadata json file
+
+    Returns:
+        BGC object
+    """
+    metadata = MibigMetadata(str(file))
+    mibig_bgc = BGC(metadata.mibig_accession, *metadata.biosyn_class)
+    mibig_bgc.mibig_bgc_class = metadata.biosyn_class
+    mibig_bgc.strain = Strain(metadata.mibig_accession)
+    return mibig_bgc
+
+
+
+ +
+ + + +
+ +
+ +
+ + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/2.0.0a3/api/nplinker/index.html b/2.0.0a3/api/nplinker/index.html new file mode 100644 index 00000000..361517da --- /dev/null +++ b/2.0.0a3/api/nplinker/index.html @@ -0,0 +1,4584 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + NPLinker - NPLinker + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

NPLinker

+ +
+ + + +

+ nplinker + + +

+ +
+ + + +
+ + + + + + + + +
+ + + +

+ NPLinker + + +

+
NPLinker(config_file: str | PathLike)
+
+ +
+ + +

Main class for the NPLinker application.

+ + +

Attributes:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescription
config + +
+

The configuration object for the current NPLinker application.

+
+
root_dir + str + +
+

The path to the root directory of the current NPLinker application.

+
+
output_dir + str + +
+

The path to the output directory of the current NPLinker application.

+
+
bgcs + list[BGC] + +
+

A list of all BGC objects.

+
+
gcfs + list[GCF] + +
+

A list of all GCF objects.

+
+
spectra + list[Spectrum] + +
+

A list of all Spectrum objects.

+
+
mfs + list[MolecularFamily] + +
+

A list of all MolecularFamily objects.

+
+
mibig_bgcs + list[BGC] + +
+

A list of all MiBIG BGC objects.

+
+
strains + StrainCollection + +
+

A StrainCollection object containing all Strain objects.

+
+
product_types + list[str] + +
+

A list of all BiGSCAPE product types.

+
+
scoring_methods + list[str] + +
+

A list of all valid scoring methods.

+
+
+ + +

Examples:

+

To start a NPLinker application:

+
>>> from nplinker import NPLinker
+>>> npl = NPLinker("path/to/config.toml")
+
+

To load all data into memory:

+
>>> npl.load_data()
+
+

To check the number of GCF objects:

+
>>> len(npl.gcfs)
+
+

To get the links for all GCF objects using the Metcalf scoring method, the result is a +LinkGraph object:

+
>>> lg = npl.get_links(npl.gcfs, "metcalf")
+
+

To get the link data between two objects:

+
>>> link_data = lg.get_link_data(npl.gcfs[0], npl.spectra[0])
+{"metcalf": Score("metcalf", 1.0, {"cutoff": 0, "standardised": False})}
+
+ + + +

Parameters:

+ + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
config_file + str | PathLike + +
+

Path to the configuration file to use.

+
+
+ required +
+ +
+ Source code in src/nplinker/nplinker.py +
def __init__(self, config_file: str | PathLike):
+    """Initialise an NPLinker instance.
+
+    Args:
+        config_file: Path to the configuration file to use.
+    """
+    # Load the configuration file
+    self.config = load_config(config_file)
+
+    # Setup logging for the application
+    setup_logging(
+        level=self.config.log.level,
+        file=self.config.log.get("file", ""),
+        use_console=self.config.log.use_console,
+    )
+    logger.info(
+        "Configuration:\n %s", pformat(self.config.as_dict(), width=20, sort_dicts=False)
+    )
+
+    # Setup the output directory
+    self._output_dir = self.config.root_dir / OUTPUT_DIRNAME
+    self._output_dir.mkdir(exist_ok=True)
+
+    # Initialise data containers that will be populated by the `load_data` method
+    self._bgc_dict: dict[str, BGC] = {}
+    self._gcf_dict: dict[str, GCF] = {}
+    self._spec_dict: dict[str, Spectrum] = {}
+    self._mf_dict: dict[str, MolecularFamily] = {}
+    self._mibig_bgcs: list[BGC] = []
+    self._strains: StrainCollection = StrainCollection()
+    self._product_types: list = []
+    self._chem_classes = None  # TODO: to be refactored
+    self._class_matches = None  # TODO: to be refactored
+
+    # Flags to keep track of whether the scoring methods have been set up
+    self._scoring_methods_setup_done = {name: False for name in self._valid_scoring_methods}
+
+
+ + + +
+ + + + + + + +
+ + + +

+ config + + + + instance-attribute + + +

+
config = load_config(config_file)
+
+ +
+
+ +
+ +
+ + + +

+ root_dir + + + + property + + +

+
root_dir: str
+
+ +
+ +

Get the path to the root directory of the current NPLinker instance.

+
+ +
+ +
+ + + +

+ output_dir + + + + property + + +

+
output_dir: str
+
+ +
+ +

Get the path to the output directory of the current NPLinker instance.

+
+ +
+ +
+ + + +

+ bgcs + + + + property + + +

+
bgcs: list[BGC]
+
+ +
+ +

Get all BGC objects.

+
+ +
+ +
+ + + +

+ gcfs + + + + property + + +

+
gcfs: list[GCF]
+
+ +
+ +

Get all GCF objects.

+
+ +
+ +
+ + + +

+ spectra + + + + property + + +

+
spectra: list[Spectrum]
+
+ +
+ +

Get all Spectrum objects.

+
+ +
+ +
+ + + +

+ mfs + + + + property + + +

+ + +
+ +

Get all MolecularFamily objects.

+
+ +
+ +
+ + + +

+ mibig_bgcs + + + + property + + +

+
mibig_bgcs: list[BGC]
+
+ +
+ +

Get all MiBIG BGC objects.

+
+ +
+ +
+ + + +

+ strains + + + + property + + +

+
strains: StrainCollection
+
+ +
+ +

Get all Strain objects.

+
+ +
+ +
+ + + +

+ product_types + + + + property + + +

+
product_types: list[str]
+
+ +
+ +

Get all BiGSCAPE product types.

+
+ +
+ +
+ + + +

+ chem_classes + + + + property + + +

+
chem_classes
+
+ +
+ +

Returns loaded ChemClassPredictions with the class predictions.

+
+ +
+ +
+ + + +

+ class_matches + + + + property + + +

+
class_matches
+
+ +
+ +

ClassMatches with the matched classes and scoring tables from MIBiG.

+
+ +
+ +
+ + + +

+ scoring_methods + + + + property + + +

+
scoring_methods: list[str]
+
+ +
+ +

Get names of all valid scoring methods.

+
+ +
+ + + +
+ + +

+ load_data + + +

+
load_data()
+
+ +
+ +

Load all data from local files into memory.

+

This method is a convenience function that calls the DatasetArranger and DatasetLoader +classes to load all data from the local filesystem into memory. The loaded data is then +stored in various private data containers for easy access.

+ +
+ Source code in src/nplinker/nplinker.py +
def load_data(self):
+    """Load all data from local files into memory.
+
+    This method is a convenience function that calls the `DatasetArranger` and `DatasetLoader`
+    classes to load all data from the local filesystem into memory. The loaded data is then
+    stored in various private data containers for easy access.
+    """
+    arranger = DatasetArranger(self.config)
+    arranger.arrange()
+    loader = DatasetLoader(self.config)
+    loader.load()
+
+    self._bgc_dict = {bgc.id: bgc for bgc in loader.bgcs}
+    self._gcf_dict = {gcf.id: gcf for gcf in loader.gcfs}
+    self._spec_dict = {spec.id: spec for spec in loader.spectra}
+    self._mf_dict = {mf.id: mf for mf in loader.mfs}
+
+    self._mibig_bgcs = loader.mibig_bgcs
+    self._strains = loader.strains
+    self._product_types = loader.product_types
+    self._chem_classes = loader.chem_classes
+    self._class_matches = loader.class_matches
+
+
+
+ +
+ +
+ + + +
get_links(
+    objects: (
+        Sequence[BGC]
+        | Sequence[GCF]
+        | Sequence[Spectrum]
+        | Sequence[MolecularFamily]
+    ),
+    scoring_method: str,
+    **scoring_params: Any
+) -> LinkGraph
+
+ +
+ +

Get the links for the given objects using the specified scoring method and parameters.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
objects + Sequence[BGC] | Sequence[GCF] | Sequence[Spectrum] | Sequence[MolecularFamily] + +
+

A sequence of objects to get the links for. The objects must be of the same +type, i.e. BGC, GCF, Spectrum or MolecularFamily type. +For scoring method metcalf, the BGC objects are not supported.

+
+
+ required +
scoring_method + str + +
+

The scoring method to use. Must be one of the valid scoring methods +self.scoring_methods, such as "metcalf".

+
+
+ required +
scoring_params + Any + +
+

Parameters to pass to the scoring method. If not provided, the default +parameters for the scoring method will be used.

+
+
+ {} +
+ + +

Returns:

+ + + + + + + + + + + + + +
TypeDescription
+ LinkGraph + +
+

A LinkGraph object containing the links for the given objects.

+
+
+ + +

Raises:

+ + + + + + + + + + + + + + + + + +
TypeDescription
+ ValueError + +
+

If input objects are empty or if the scoring method is invalid.

+
+
+ TypeError + +
+

If the input objects are not of the same type or if the object type is invalid.

+
+
+ +
+ Source code in src/nplinker/nplinker.py +
def get_links(
+    self,
+    objects: Sequence[BGC] | Sequence[GCF] | Sequence[Spectrum] | Sequence[MolecularFamily],
+    scoring_method: str,
+    **scoring_params: Any,
+) -> LinkGraph:
+    """Get the links for the given objects using the specified scoring method and parameters.
+
+    Args:
+        objects: A sequence of objects to get the links for. The objects must be of the same
+            type, i.e. `BGC`, `GCF`, `Spectrum` or `MolecularFamily` type.
+            For scoring method `metcalf`, the BGC objects are not supported.
+        scoring_method: The scoring method to use. Must be one of the valid scoring methods
+            `self.scoring_methods`, such as "metcalf".
+        scoring_params: Parameters to pass to the scoring method. If not provided, the default
+            parameters for the scoring method will be used.
+
+    Returns:
+        A LinkGraph object containing the links for the given objects.
+
+    Raises:
+        ValueError: If input objects are empty or if the scoring method is invalid.
+        TypeError: If the input objects are not of the same type or if the object type is invalid.
+    """
+    # Validate objects
+    if len(objects) == 0:
+        raise ValueError("No objects provided to get links for")
+    # check if all objects are of the same type
+    types = {type(i) for i in objects}
+    if len(types) > 1:
+        raise TypeError("Input objects must be of the same type.")
+    # check if the object type is valid
+    obj_type = next(iter(types))
+    if obj_type not in (BGC, GCF, Spectrum, MolecularFamily):
+        raise TypeError(
+            f"Invalid type {obj_type}. Input objects must be BGC, GCF, Spectrum or MolecularFamily objects."
+        )
+
+    # Validate scoring method
+    if scoring_method not in self._valid_scoring_methods:
+        raise ValueError(f"Invalid scoring method {scoring_method}.")
+
+    # Check if the scoring method has been set up
+    if not self._scoring_methods_setup_done[scoring_method]:
+        self._valid_scoring_methods[scoring_method].setup(self)
+        self._scoring_methods_setup_done[scoring_method] = True
+
+    # Initialise the scoring method
+    scoring = self._valid_scoring_methods[scoring_method]()
+
+    return scoring.get_links(*objects, **scoring_params)
+
+
+
+ +
+ +
+ + +

+ lookup_bgc + + +

+
lookup_bgc(id: str) -> BGC | None
+
+ +
+ +

Get the BGC object with the given ID.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
id + str + +
+

the ID of the BGC to look up.

+
+
+ required +
+ + +

Returns:

+ + + + + + + + + + + + + +
TypeDescription
+ BGC | None + +
+

The BGC object with the given ID, or None if no such object exists.

+
+
+ +
+ Source code in src/nplinker/nplinker.py +
def lookup_bgc(self, id: str) -> BGC | None:
+    """Get the BGC object with the given ID.
+
+    Args:
+        id: the ID of the BGC to look up.
+
+    Returns:
+        The BGC object with the given ID, or None if no such object exists.
+    """
+    return self._bgc_dict.get(id, None)
+
+
+
+ +
+ +
+ + +

+ lookup_gcf + + +

+
lookup_gcf(id: str) -> GCF | None
+
+ +
+ +

Get the GCF object with the given ID.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
id + str + +
+

the ID of the GCF to look up.

+
+
+ required +
+ + +

Returns:

+ + + + + + + + + + + + + +
TypeDescription
+ GCF | None + +
+

The GCF object with the given ID, or None if no such object exists.

+
+
+ +
+ Source code in src/nplinker/nplinker.py +
def lookup_gcf(self, id: str) -> GCF | None:
+    """Get the GCF object with the given ID.
+
+    Args:
+        id: the ID of the GCF to look up.
+
+    Returns:
+        The GCF object with the given ID, or None if no such object exists.
+    """
+    return self._gcf_dict.get(id, None)
+
+
+
+ +
+ +
+ + +

+ lookup_spectrum + + +

+
lookup_spectrum(id: str) -> Spectrum | None
+
+ +
+ +

Get the Spectrum object with the given ID.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
id + str + +
+

the ID of the Spectrum to look up.

+
+
+ required +
+ + +

Returns:

+ + + + + + + + + + + + + +
TypeDescription
+ Spectrum | None + +
+

The Spectrum object with the given ID, or None if no such object exists.

+
+
+ +
+ Source code in src/nplinker/nplinker.py +
def lookup_spectrum(self, id: str) -> Spectrum | None:
+    """Get the Spectrum object with the given ID.
+
+    Args:
+        id: the ID of the Spectrum to look up.
+
+    Returns:
+        The Spectrum object with the given ID, or None if no such object exists.
+    """
+    return self._spec_dict.get(id, None)
+
+
+
+ +
+ +
+ + +

+ lookup_mf + + +

+
lookup_mf(id: str) -> MolecularFamily | None
+
+ +
+ +

Get the MolecularFamily object with the given ID.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
id + str + +
+

the ID of the MolecularFamily to look up.

+
+
+ required +
+ + +

Returns:

+ + + + + + + + + + + + + +
TypeDescription
+ MolecularFamily | None + +
+

The MolecularFamily object with the given ID, or None if no such object exists.

+
+
+ +
+ Source code in src/nplinker/nplinker.py +
def lookup_mf(self, id: str) -> MolecularFamily | None:
+    """Get the MolecularFamily object with the given ID.
+
+    Args:
+        id: the ID of the MolecularFamily to look up.
+
+    Returns:
+        The MolecularFamily object with the given ID, or None if no such object exists.
+    """
+    return self._mf_dict.get(id, None)
+
+
+
+ +
+ +
+ + +

+ save_data + + +

+
save_data(
+    file: str | PathLike, links: LinkGraph | None = None
+) -> None
+
+ +
+ +

Pickle data to a file.

+

The data to be pickled is a tuple containing the BGCs, GCFs, Spectra, MolecularFamilies, +StrainCollection and links, i.e. (bgcs, gcfs, spectra, mfs, strains, links). If the links +are not provided, None will be used.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
file + str | PathLike + +
+

The path to the pickle file to save the data to.

+
+
+ required +
links + LinkGraph | None + +
+

The LinkGraph object to save.

+
+
+ None +
+ +
+ Source code in src/nplinker/nplinker.py +
def save_data(
+    self,
+    file: str | PathLike,
+    links: LinkGraph | None = None,
+) -> None:
+    """Pickle data to a file.
+
+    The data to be pickled is a tuple containing the BGCs, GCFs, Spectra, MolecularFamilies,
+    StrainCollection and links, i.e. `(bgcs, gcfs, spectra, mfs, strains, links)`. If the links
+    are not provided, `None` will be used.
+
+    Args:
+        file: The path to the pickle file to save the data to.
+        links: The LinkGraph object to save.
+    """
+    data = (self.bgcs, self.gcfs, self.spectra, self.mfs, self.strains, links)
+    with open(file, "wb") as f:
+        pickle.dump(data, f)
+
+
+
+ +
+ + + +
+ +
+ +
+ + +
+ + +

+ setup_logging + + +

+
setup_logging(
+    level: str = "INFO",
+    file: str = "",
+    use_console: bool = True,
+) -> None
+
+ +
+ +

Setup logging configuration for the ancestor logger "nplinker".

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
level + str + +
+

The log level, use the logging module's log level constants. Valid levels are: +"NOTSET", "DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL".

+
+
+ 'INFO' +
file + str + +
+

The file to write the log to. If the file does not exist, it will be created. The log +will be written to the file in append mode. If the file is an empty string (by default), +the log will not be written to a file.

+
+
+ '' +
use_console + bool + +
+

Whether to log to the console.

+
+
+ True +
+ +
+ Source code in src/nplinker/logger.py +
 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
def setup_logging(level: str = "INFO", file: str = "", use_console: bool = True) -> None:
+    """Setup logging configuration for the ancestor logger "nplinker".
+
+    Args:
+        level: The log level, use the logging module's log level constants. Valid levels are:
+            "NOTSET", "DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL".
+        file: The file to write the log to. If the file does not exist, it will be created. The log
+            will be written to the file in append mode. If the file is an empty string (by default),
+            the log will not be written to a file.
+        use_console: Whether to log to the console.
+    """
+    # Get the ancestor logger "nplinker"
+    logger = logging.getLogger("nplinker")
+    logger.setLevel(level)
+
+    # File handler
+    if file:
+        logger.addHandler(
+            RichHandler(
+                console=Console(file=open(file, "a"), width=120),  # force the line width to 120
+                omit_repeated_times=False,
+                rich_tracebacks=True,
+                tracebacks_show_locals=True,
+                log_time_format="[%Y-%m-%d %X]",
+            )
+        )
+
+    # Console handler
+    if use_console:
+        logger.addHandler(
+            RichHandler(
+                omit_repeated_times=False,
+                rich_tracebacks=True,
+                tracebacks_show_locals=True,
+                log_time_format="[%Y-%m-%d %X]",
+            )
+        )
+
+
+
+ +
+ + + +
+ +
+ +
+ +
+ + + +

+ defaults + + +

+ +
+ + + +
+ + + + + + + +
+ + + +

+ NPLINKER_APP_DATA_DIR + + + + module-attribute + + +

+
NPLINKER_APP_DATA_DIR: Final = parent / 'data'
+
+ +
+
+ +
+ +
+ + + +

+ STRAIN_MAPPINGS_FILENAME + + + + module-attribute + + +

+
STRAIN_MAPPINGS_FILENAME: Final = 'strain_mappings.json'
+
+ +
+
+ +
+ +
+ + + +

+ GENOME_BGC_MAPPINGS_FILENAME + + + + module-attribute + + +

+
GENOME_BGC_MAPPINGS_FILENAME: Final = (
+    "genome_bgc_mappings.json"
+)
+
+ +
+
+ +
+ +
+ + + +

+ GENOME_STATUS_FILENAME + + + + module-attribute + + +

+
GENOME_STATUS_FILENAME: Final = 'genome_status.json'
+
+ +
+
+ +
+ +
+ + + +

+ GNPS_SPECTRA_FILENAME + + + + module-attribute + + +

+
GNPS_SPECTRA_FILENAME: Final = 'spectra.mgf'
+
+ +
+
+ +
+ +
+ + + +

+ GNPS_MOLECULAR_FAMILY_FILENAME + + + + module-attribute + + +

+
GNPS_MOLECULAR_FAMILY_FILENAME: Final = (
+    "molecular_families.tsv"
+)
+
+ +
+
+ +
+ +
+ + + +

+ GNPS_ANNOTATIONS_FILENAME + + + + module-attribute + + +

+
GNPS_ANNOTATIONS_FILENAME: Final = 'annotations.tsv'
+
+ +
+
+ +
+ +
+ + + +

+ GNPS_FILE_MAPPINGS_TSV + + + + module-attribute + + +

+
GNPS_FILE_MAPPINGS_TSV: Final = 'file_mappings.tsv'
+
+ +
+
+ +
+ +
+ + + +

+ GNPS_FILE_MAPPINGS_CSV + + + + module-attribute + + +

+
GNPS_FILE_MAPPINGS_CSV: Final = 'file_mappings.csv'
+
+ +
+
+ +
+ +
+ + + +

+ STRAINS_SELECTED_FILENAME + + + + module-attribute + + +

+
STRAINS_SELECTED_FILENAME: Final = 'strains_selected.json'
+
+ +
+
+ +
+ +
+ + + +

+ DOWNLOADS_DIRNAME + + + + module-attribute + + +

+
DOWNLOADS_DIRNAME: Final = 'downloads'
+
+ +
+
+ +
+ +
+ + + +

+ MIBIG_DIRNAME + + + + module-attribute + + +

+
MIBIG_DIRNAME: Final = 'mibig'
+
+ +
+
+ +
+ +
+ + + +

+ GNPS_DIRNAME + + + + module-attribute + + +

+
GNPS_DIRNAME: Final = 'gnps'
+
+ +
+
+ +
+ +
+ + + +

+ ANTISMASH_DIRNAME + + + + module-attribute + + +

+
ANTISMASH_DIRNAME: Final = 'antismash'
+
+ +
+
+ +
+ +
+ + + +

+ BIGSCAPE_DIRNAME + + + + module-attribute + + +

+
BIGSCAPE_DIRNAME: Final = 'bigscape'
+
+ +
+
+ +
+ +
+ + + +

+ BIGSCAPE_RUNNING_OUTPUT_DIRNAME + + + + module-attribute + + +

+
BIGSCAPE_RUNNING_OUTPUT_DIRNAME: Final = (
+    "bigscape_running_output"
+)
+
+ +
+
+ +
+ +
+ + + +

+ OUTPUT_DIRNAME + + + + module-attribute + + +

+
OUTPUT_DIRNAME: Final = 'output'
+
+ +
+
+ +
+ + + + + +
+ +
+ +
+ +
+ + + +

+ config + + +

+ +
+ + + +
+ + + + + + + +
+ + + +

+ CONFIG_VALIDATORS + + + + module-attribute + + +

+
CONFIG_VALIDATORS = [
+    Validator(
+        "root_dir",
+        required=True,
+        cast=transform_to_full_path,
+        condition=lambda v: is_dir(),
+    ),
+    Validator(
+        "mode",
+        required=True,
+        cast=lambda v: lower(),
+        is_in=["local", "podp"],
+    ),
+    Validator(
+        "podp_id",
+        required=True,
+        when=Validator("mode", eq="podp"),
+    ),
+    Validator(
+        "podp_id",
+        required=False,
+        when=Validator("mode", eq="local"),
+    ),
+    Validator(
+        "log.level",
+        is_type_of=str,
+        cast=lambda v: upper(),
+        is_in=[
+            "NOTSET",
+            "DEBUG",
+            "INFO",
+            "WARNING",
+            "ERROR",
+            "CRITICAL",
+        ],
+    ),
+    Validator("log.file", is_type_of=str),
+    Validator("log.use_console", is_type_of=bool),
+    Validator(
+        "mibig.to_use", required=True, is_type_of=bool
+    ),
+    Validator(
+        "mibig.version",
+        required=True,
+        is_type_of=str,
+        when=Validator("mibig.to_use", eq=True),
+    ),
+    Validator(
+        "bigscape.parameters", required=True, is_type_of=str
+    ),
+    Validator(
+        "bigscape.cutoff", required=True, is_type_of=str
+    ),
+    Validator(
+        "scoring.methods",
+        required=True,
+        cast=lambda v: [lower() for i in v],
+        is_type_of=list,
+        len_min=1,
+        condition=lambda v: issubset(
+            {"metcalf", "rosetta"}
+        ),
+    ),
+]
+
+ +
+
+ +
+ + + +
+ + +

+ load_config + + +

+
load_config(config_file: str | PathLike) -> Dynaconf
+
+ +
+ +

Load and validate the configuration file.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
config_file + str | PathLike + +
+

Path to the configuration file.

+
+
+ required +
+ + +

Returns:

+ + + + + + + + + + + + + +
Name TypeDescription
Dynaconf + Dynaconf + +
+

A Dynaconf object containing the configuration settings.

+
+
+ + +

Raises:

+ + + + + + + + + + + + + +
TypeDescription
+ FileNotFoundError + +
+

If the configuration file does not exist.

+
+
+ +
+ Source code in src/nplinker/config.py +
 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
def load_config(config_file: str | PathLike) -> Dynaconf:
+    """Load and validate the configuration file.
+
+    Args:
+        config_file: Path to the configuration file.
+
+    Returns:
+        Dynaconf: A Dynaconf object containing the configuration settings.
+
+    Raises:
+        FileNotFoundError: If the configuration file does not exist.
+    """
+    config_file = transform_to_full_path(config_file)
+    if not config_file.exists():
+        raise FileNotFoundError(f"Config file '{config_file}' not found")
+
+    # Locate the default config file
+    default_config_file = Path(__file__).resolve().parent / "nplinker_default.toml"
+
+    # Load config files
+    config = Dynaconf(settings_files=[config_file], preload=[default_config_file])
+
+    # Validate configs
+    config.validators.register(*CONFIG_VALIDATORS)
+    config.validators.validate()
+
+    return config
+
+
+
+ +
+ + + +
+ +
+ +
+ + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/2.0.0a3/api/schema/index.html b/2.0.0a3/api/schema/index.html new file mode 100644 index 00000000..058057d0 --- /dev/null +++ b/2.0.0a3/api/schema/index.html @@ -0,0 +1,1924 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + Schemas - NPLinker + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + +

Schemas

+ +
+ + + +

+ schemas + + +

+ +
+ + + +
+ + + + + + + +
+ + + +

+ PODP_ADAPTED_SCHEMA + + + + module-attribute + + +

+
PODP_ADAPTED_SCHEMA = load(f)
+
+ +
+
+ +
+ +
+ + + +

+ SCHEMA_DIR + + + + module-attribute + + +

+
SCHEMA_DIR = parent
+
+ +
+
+ +
+ +
+ + + +

+ GENOME_STATUS_SCHEMA + + + + module-attribute + + +

+
GENOME_STATUS_SCHEMA = load(f)
+
+ +
+
+ +
+ +
+ + + +

+ GENOME_BGC_MAPPINGS_SCHEMA + + + + module-attribute + + +

+
GENOME_BGC_MAPPINGS_SCHEMA = load(f)
+
+ +
+
+ +
+ +
+ + + +

+ STRAIN_MAPPINGS_SCHEMA + + + + module-attribute + + +

+
STRAIN_MAPPINGS_SCHEMA = load(f)
+
+ +
+
+ +
+ +
+ + + +

+ USER_STRAINS_SCHEMA + + + + module-attribute + + +

+
USER_STRAINS_SCHEMA = load(f)
+
+ +
+
+ +
+ + + +
+ + +

+ validate_podp_json + + +

+
validate_podp_json(json_data: dict) -> None
+
+ +
+ +

Validate a dictionary of JSON data against the PODP JSON schema.

+

All validation error messages are collected and raised as a single +ValueError.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
json_data + dict + +
+

The JSON data to validate.

+
+
+ required +
+ + +

Raises:

+ + + + + + + + + + + + + +
TypeDescription
+ ValueError + +
+

If the JSON data does not match the schema.

+
+
+ +
+ Source code in src/nplinker/schemas/utils.py +
10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
def validate_podp_json(json_data: dict) -> None:
+    """Validate a dictionary of JSON data against the PODP JSON schema.
+
+    All validation error messages are collected and raised as a single
+    ValueError.
+
+    Args:
+        json_data: The JSON data to validate.
+
+    Raises:
+        ValueError: If the JSON data does not match the schema.
+    """
+    validator = Draft7Validator(PODP_ADAPTED_SCHEMA)
+    errors = sorted(validator.iter_errors(json_data), key=lambda e: e.path)
+    if errors:
+        error_messages = [f"{e.json_path}: {e.message}" for e in errors]
+        raise ValueError(
+            "Not match PODP adapted schema, here are the detailed error:\n  - "
+            + "\n  - ".join(error_messages)
+        )
+
+
+
+ +
+ + + +
+ +
+ +
+ + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/2.0.0a3/api/scoring/index.html b/2.0.0a3/api/scoring/index.html new file mode 100644 index 00000000..559e1786 --- /dev/null +++ b/2.0.0a3/api/scoring/index.html @@ -0,0 +1,2406 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + Data Models - NPLinker + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Data Models

+ +
+ + + +

+ scoring + + +

+ +
+ + + +
+ + + + + + + + +
+ + + +

+ LinkGraph + + +

+
LinkGraph()
+
+ +
+ + +

A class to represent the links between objects in NPLinker.

+

This class wraps the networkx.Graph class to provide a more user-friendly interface for +working with the links.

+

The links between objects are stored as edges in a graph, while the objects themselves are +stored as nodes.

+

The scoring data for each link (or link data) is stored as the key/value attributes of the edge.

+ + +

Examples:

+

Create a LinkGraph object:

+
>>> lg = LinkGraph()
+
+

Add a link between a GCF and a Spectrum object:

+
>>> lg.add_link(gcf, spectrum, metcalf=Score("metcalf", 1.0, {"cutoff": 0.5}))
+
+

Get all links for a given object:

+
>>> lg[gcf]
+{spectrum: {"metcalf": Score("metcalf", 1.0, {"cutoff": 0.5})}}
+
+

Get all links:

+
>>> lg.links
+[(gcf, spectrum, {"metcalf": Score("metcalf", 1.0, {"cutoff": 0.5})})]
+
+

Check if there is a link between two objects:

+
>>> lg.has_link(gcf, spectrum)
+True
+
+

Get the link data between two objects:

+
>>> lg.get_link_data(gcf, spectrum)
+{"metcalf": Score("metcalf", 1.0, {"cutoff": 0.5})}
+
+ +
+ Source code in src/nplinker/scoring/link_graph.py +
def __init__(self) -> None:
+    self._g: Graph = Graph()
+
+
+ + + +
+ + + + + + + +
+ + + + +
links: list[LINK]
+
+ +
+ +

Get all links.

+ + +

Returns:

+ + + + + + + + + + + + + +
TypeDescription
+ list[LINK] + +
+

A list of tuples containing the links between objects.

+
+
+
+ +
+ + + +
+ + + +
add_link(u: Entity, v: Entity, **data: Score) -> None
+
+ +
+ +

Add a link between two objects.

+

The objects u and v must be different types, i.e. one must be a GCF and the other must be +a Spectrum or MolecularFamily.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
u + Entity + +
+

the first object, either a GCF, Spectrum, or MolecularFamily

+
+
+ required +
v + Entity + +
+

the second object, either a GCF, Spectrum, or MolecularFamily

+
+
+ required +
data + Score + +
+

keyword arguments. At least one scoring method and its data must be provided. +The key must be the name of the scoring method defined in ScoringMethod, and the +value is a Score object, e.g. metcalf=Score("metcalf", 1.0, {"cutoff": 0.5}).

+
+
+ {} +
+ +
+ Source code in src/nplinker/scoring/link_graph.py +
@validate_uv
+def add_link(
+    self,
+    u: Entity,
+    v: Entity,
+    **data: Score,
+) -> None:
+    """Add a link between two objects.
+
+    The objects `u` and `v` must be different types, i.e. one must be a GCF and the other must be
+    a Spectrum or MolecularFamily.
+
+    Args:
+        u: the first object, either a GCF, Spectrum, or MolecularFamily
+        v: the second object, either a GCF, Spectrum, or MolecularFamily
+        data: keyword arguments. At least one scoring method and its data must be provided.
+            The key must be the name of the scoring method defined in `ScoringMethod`, and the
+            value is a `Score` object, e.g. `metcalf=Score("metcalf", 1.0, {"cutoff": 0.5})`.
+    """
+    # validate the data
+    if not data:
+        raise ValueError("At least one scoring method and its data must be provided.")
+    for key, value in data.items():
+        if not ScoringMethod.has_value(key):
+            raise ValueError(
+                f"{key} is not a valid name of scoring method. See `ScoringMethod` for valid names."
+            )
+        if not isinstance(value, Score):
+            raise TypeError(f"{value} is not a Score object.")
+
+    self._g.add_edge(u, v, **data)
+
+
+
+ +
+ +
+ + + +
has_link(u: Entity, v: Entity) -> bool
+
+ +
+ +

Check if there is a link between two objects.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
u + Entity + +
+

the first object, either a GCF, Spectrum, or MolecularFamily

+
+
+ required +
v + Entity + +
+

the second object, either a GCF, Spectrum, or MolecularFamily

+
+
+ required +
+ + +

Returns:

+ + + + + + + + + + + + + +
TypeDescription
+ bool + +
+

True if there is a link between the two objects, False otherwise

+
+
+ +
+ Source code in src/nplinker/scoring/link_graph.py +
@validate_uv
+def has_link(self, u: Entity, v: Entity) -> bool:
+    """Check if there is a link between two objects.
+
+    Args:
+        u: the first object, either a GCF, Spectrum, or MolecularFamily
+        v: the second object, either a GCF, Spectrum, or MolecularFamily
+
+    Returns:
+        True if there is a link between the two objects, False otherwise
+    """
+    return self._g.has_edge(u, v)
+
+
+
+ +
+ +
+ + + +
get_link_data(u: Entity, v: Entity) -> LINK_DATA | None
+
+ +
+ +

Get the data for a link between two objects.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
u + Entity + +
+

the first object, either a GCF, Spectrum, or MolecularFamily

+
+
+ required +
v + Entity + +
+

the second object, either a GCF, Spectrum, or MolecularFamily

+
+
+ required +
+ + +

Returns:

+ + + + + + + + + + + + + + + + + +
TypeDescription
+ LINK_DATA | None + +
+

A dictionary of scoring methods and their data for the link between the two objects, or

+
+
+ LINK_DATA | None + +
+

None if there is no link between the two objects.

+
+
+ +
+ Source code in src/nplinker/scoring/link_graph.py +
@validate_uv
+def get_link_data(
+    self,
+    u: Entity,
+    v: Entity,
+) -> LINK_DATA | None:
+    """Get the data for a link between two objects.
+
+    Args:
+        u: the first object, either a GCF, Spectrum, or MolecularFamily
+        v: the second object, either a GCF, Spectrum, or MolecularFamily
+
+    Returns:
+        A dictionary of scoring methods and their data for the link between the two objects, or
+        None if there is no link between the two objects.
+    """
+    return self._g.get_edge_data(u, v)  # type: ignore
+
+
+
+ +
+ + + +
+ +
+ +
+ +
+ + + +

+ Score + + + + dataclass + + +

+
Score(name: str, value: float, parameter: dict)
+
+ +
+ + +

A data class to represent score data.

+ + +

Attributes:

+ + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescription
name + str + +
+

the name of the scoring method. See ScoringMethod for valid values.

+
+
value + float + +
+

the score value.

+
+
parameter + dict + +
+

the parameters used for the scoring method.

+
+
+ + + + +
+ + + + + + + +
+ + + +

+ name + + + + instance-attribute + + +

+
name: str
+
+ +
+
+ +
+ +
+ + + +

+ value + + + + instance-attribute + + +

+
value: float
+
+ +
+
+ +
+ +
+ + + +

+ parameter + + + + instance-attribute + + +

+
parameter: dict
+
+ +
+
+ +
+ + + + + +
+ +
+ +
+ + + + +
+ +
+ +
+ + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/2.0.0a3/api/scoring_abc/index.html b/2.0.0a3/api/scoring_abc/index.html new file mode 100644 index 00000000..4933f344 --- /dev/null +++ b/2.0.0a3/api/scoring_abc/index.html @@ -0,0 +1,2073 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + Abstract Base Classes - NPLinker + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Abstract Base Classes

+ +
+ + + +

+ abc + + +

+ +
+ + + +
+ + + + + + + +
+ + + +

+ logger + + + + module-attribute + + +

+
logger = getLogger(__name__)
+
+ +
+
+ +
+ + +
+ + + +

+ ScoringBase + + +

+ + +
+

+ Bases: ABC

+ + +

Abstract base class of scoring methods.

+ + +

Attributes:

+ + + + + + + + + + + + + + + + + + + + +
NameTypeDescription
name + str + +
+

The name of the scoring method.

+
+
npl + NPLinker | None + +
+

The NPLinker object.

+
+
+ + + + +
+ + + + + + + +
+ + + +

+ name + + + + class-attribute + instance-attribute + + +

+
name: str = 'ScoringBase'
+
+ +
+
+ +
+ +
+ + + +

+ npl + + + + class-attribute + instance-attribute + + +

+
npl: NPLinker | None = None
+
+ +
+
+ +
+ + + +
+ + +

+ setup + + + + abstractmethod + classmethod + + +

+
setup(npl: NPLinker)
+
+ +
+ +

Setup class level attributes.

+ +
+ Source code in src/nplinker/scoring/abc.py +
26
+27
+28
+29
@classmethod
+@abstractmethod
+def setup(cls, npl: NPLinker):
+    """Setup class level attributes."""
+
+
+
+ +
+ +
+ + + +
get_links(*objects, **parameters) -> LinkGraph
+
+ +
+ +

Get links information for the given objects.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
objects + +
+

A list of objects to get links for.

+
+
+ () +
parameters + +
+

The parameters used for scoring.

+
+
+ {} +
+ + +

Returns:

+ + + + + + + + + + + + + +
TypeDescription
+ LinkGraph + +
+

The LinkGraph object.

+
+
+ +
+ Source code in src/nplinker/scoring/abc.py +
31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
@abstractmethod
+def get_links(
+    self,
+    *objects,
+    **parameters,
+) -> LinkGraph:
+    """Get links information for the given objects.
+
+    Args:
+        objects: A list of objects to get links for.
+        parameters: The parameters used for scoring.
+
+    Returns:
+        The LinkGraph object.
+    """
+
+
+
+ +
+ +
+ + +

+ format_data + + + + abstractmethod + + +

+
format_data(data) -> str
+
+ +
+ +

Format the scoring data to a string.

+ +
+ Source code in src/nplinker/scoring/abc.py +
47
+48
+49
@abstractmethod
+def format_data(self, data) -> str:
+    """Format the scoring data to a string."""
+
+
+
+ +
+ +
+ + +

+ sort + + + + abstractmethod + + +

+
sort(objects, reverse=True) -> list
+
+ +
+ +

Sort the given objects based on the scoring data.

+ +
+ Source code in src/nplinker/scoring/abc.py +
51
+52
+53
@abstractmethod
+def sort(self, objects, reverse=True) -> list:
+    """Sort the given objects based on the scoring data."""
+
+
+
+ +
+ + + +
+ +
+ +
+ + + + +
+ +
+ +
+ + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/2.0.0a3/api/scoring_methods/index.html b/2.0.0a3/api/scoring_methods/index.html new file mode 100644 index 00000000..907e3505 --- /dev/null +++ b/2.0.0a3/api/scoring_methods/index.html @@ -0,0 +1,2974 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + Scoring Methods - NPLinker + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + +

Scoring Methods

+ +
+ + + +

+ scoring + + +

+ +
+ + + +
+ + + + + + + + +
+ + + +

+ ScoringMethod + + +

+ + +
+

+ Bases: Enum

+ + +

Enum class for scoring methods.

+ + + + +
+ + + + + + + +
+ + + +

+ METCALF + + + + class-attribute + instance-attribute + + +

+
METCALF = 'metcalf'
+
+ +
+
+ +
+ +
+ + + +

+ ROSETTA + + + + class-attribute + instance-attribute + + +

+
ROSETTA = 'rosetta'
+
+ +
+
+ +
+ +
+ + + +

+ NPLCLASS + + + + class-attribute + instance-attribute + + +

+
NPLCLASS = 'nplclass'
+
+ +
+
+ +
+ + + +
+ + +

+ has_value + + + + classmethod + + +

+
has_value(value: str) -> bool
+
+ +
+ +

Check if the enum has a value.

+ +
+ Source code in src/nplinker/scoring/scoring_method.py +
13
+14
+15
+16
@classmethod
+def has_value(cls, value: str) -> bool:
+    """Check if the enum has a value."""
+    return any(value == item.value for item in cls)
+
+
+
+ +
+ + + +
+ +
+ +
+ +
+ + + +

+ MetcalfScoring + + +

+ + +
+

+ Bases: ScoringBase

+ + +

Metcalf scoring method.

+ + +

Attributes:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescription
name + +
+

The name of this scoring method, set to a fixed value metcalf.

+
+
npl + NPLinker | None + +
+

The NPLinker object.

+
+
CACHE + str + +
+

The name of the cache file to use for storing the MetcalfScoring.

+
+
presence_gcf_strain + DataFrame + +
+

A DataFrame to store presence of gcfs with respect to strains. +The index of the DataFrame are the GCF objects and the columns are Strain objects. +The values are 1 where the gcf occurs in the strain, 0 otherwise.

+
+
presence_spec_strain + DataFrame + +
+

A DataFrame to store presence of spectra with respect to strains. +The index of the DataFrame are the Spectrum objects and the columns are Strain objects. +The values are 1 where the spectrum occurs in the strain, 0 otherwise.

+
+
presence_mf_strain + DataFrame + +
+

A DataFrame to store presence of molecular families with respect to strains. +The index of the DataFrame are the MolecularFamily objects and the columns are Strain objects. +The values are 1 where the molecular family occurs in the strain, 0 otherwise.

+
+
raw_score_spec_gcf + DataFrame + +
+

A DataFrame to store the raw Metcalf scores for spectrum-gcf links. +The columns are "spec", "gcf" and "score":

+
    +
  • The "spec" and "gcf" columns contain the Spectrum and GCF objects respectively,
  • +
  • The "score" column contains the raw Metcalf scores.
  • +
+
+
raw_score_mf_gcf + DataFrame + +
+

A DataFrame to store the raw Metcalf scores for molecular family-gcf links. +The columns are "mf", "gcf" and "score":

+
    +
  • The "mf" and "gcf" columns contain the MolecularFamily and GCF objects respectively,
  • +
  • the "score" column contains the raw Metcalf scores.
  • +
+
+
metcalf_mean + ndarray | None + +
+

A numpy array to store the mean value used for standardising Metcalf scores. +The array has shape (n_strains+1, n_strains+1), where n_strains is the number of strains.

+
+
metcalf_std + ndarray | None + +
+

A numpy array to store the standard deviation value used for standardising +Metcalf scores. The array has shape (n_strains+1, n_strains+1), where n_strains is the +number of strains.

+
+
+ + + + +
+ + + + + + + +
+ + + +

+ name + + + + class-attribute + instance-attribute + + +

+
name = METCALF.value
+
+ +
+
+ +
+ +
+ + + +

+ npl + + + + class-attribute + instance-attribute + + +

+
npl: NPLinker | None = None
+
+ +
+
+ +
+ +
+ + + +

+ CACHE + + + + class-attribute + instance-attribute + + +

+
CACHE: str = 'cache_metcalf_scoring.pckl'
+
+ +
+
+ +
+ +
+ + + +

+ metcalf_weights + + + + class-attribute + instance-attribute + + +

+
metcalf_weights: tuple[int, int, int, int] = (10, -10, 0, 1)
+
+ +
+
+ +
+ +
+ + + +

+ presence_gcf_strain + + + + class-attribute + instance-attribute + + +

+
presence_gcf_strain: DataFrame = DataFrame()
+
+ +
+
+ +
+ +
+ + + +

+ presence_spec_strain + + + + class-attribute + instance-attribute + + +

+
presence_spec_strain: DataFrame = DataFrame()
+
+ +
+
+ +
+ +
+ + + +

+ presence_mf_strain + + + + class-attribute + instance-attribute + + +

+
presence_mf_strain: DataFrame = DataFrame()
+
+ +
+
+ +
+ +
+ + + +

+ raw_score_spec_gcf + + + + class-attribute + instance-attribute + + +

+
raw_score_spec_gcf: DataFrame = DataFrame(
+    columns=["spec", "gcf", "score"]
+)
+
+ +
+
+ +
+ +
+ + + +

+ raw_score_mf_gcf + + + + class-attribute + instance-attribute + + +

+
raw_score_mf_gcf: DataFrame = DataFrame(
+    columns=["mf", "gcf", "score"]
+)
+
+ +
+
+ +
+ +
+ + + +

+ metcalf_mean + + + + class-attribute + instance-attribute + + +

+
metcalf_mean: ndarray | None = None
+
+ +
+
+ +
+ +
+ + + +

+ metcalf_std + + + + class-attribute + instance-attribute + + +

+
metcalf_std: ndarray | None = None
+
+ +
+
+ +
+ + + +
+ + +

+ setup + + + + classmethod + + +

+
setup(npl: NPLinker)
+
+ +
+ +

Setup the MetcalfScoring object.

+

This method is only called once to setup the MetcalfScoring object.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
npl + NPLinker + +
+

The NPLinker object.

+
+
+ required +
+ +
+ Source code in src/nplinker/scoring/metcalf_scoring.py +
@classmethod
+def setup(cls, npl: NPLinker):
+    """Setup the MetcalfScoring object.
+
+    This method is only called once to setup the MetcalfScoring object.
+
+    Args:
+        npl: The NPLinker object.
+    """
+    if cls.npl is not None:
+        logger.info("MetcalfScoring.setup already called, skipping.")
+        return
+
+    logger.info(
+        f"MetcalfScoring.setup starts: #bgcs={len(npl.bgcs)}, #gcfs={len(npl.gcfs)}, "
+        f"#spectra={len(npl.spectra)}, #mfs={len(npl.mfs)}, #strains={npl.strains}"
+    )
+    cls.npl = npl
+
+    # calculate presence of gcfs/spectra/mfs with respect to strains
+    cls.presence_gcf_strain = get_presence_gcf_strain(npl.gcfs, npl.strains)
+    cls.presence_spec_strain = get_presence_spec_strain(npl.spectra, npl.strains)
+    cls.presence_mf_strain = get_presence_mf_strain(npl.mfs, npl.strains)
+
+    # calculate raw Metcalf scores for spec-gcf links
+    raw_score_spec_gcf = cls._calc_raw_score(
+        cls.presence_spec_strain, cls.presence_gcf_strain, cls.metcalf_weights
+    )
+    cls.raw_score_spec_gcf = raw_score_spec_gcf.reset_index().melt(id_vars="index")
+    cls.raw_score_spec_gcf.columns = ["spec", "gcf", "score"]  # type: ignore
+
+    # calculate raw Metcalf scores for spec-gcf links
+    raw_score_mf_gcf = cls._calc_raw_score(
+        cls.presence_mf_strain, cls.presence_gcf_strain, cls.metcalf_weights
+    )
+    cls.raw_score_mf_gcf = raw_score_mf_gcf.reset_index().melt(id_vars="index")
+    cls.raw_score_mf_gcf.columns = ["mf", "gcf", "score"]  # type: ignore
+
+    # calculate mean and std for standardising Metcalf scores
+    cls.metcalf_mean, cls.metcalf_std = cls._calc_mean_std(
+        len(npl.strains), cls.metcalf_weights
+    )
+
+    logger.info("MetcalfScoring.setup completed")
+
+
+
+ +
+ +
+ + + +
get_links(*objects, **parameters)
+
+ +
+ +

Get links for the given objects.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
objects + +
+

The objects to get links for. All objects must be of the same type, i.e. GCF, +Spectrum or MolecularFamily type. +If no objects are provided, all detected objects (npl.gcfs) will be used.

+
+
+ () +
parameters + +
+

The scoring parameters to use for the links. The parameters are:

+
- cutoff: The minimum score to consider a link (≥cutoff). Default is 0.
+- standardised: Whether to use standardised scores. Default is False.
+
+
+
+ {} +
+ + +

Returns:

+ + + + + + + + + + + + + +
TypeDescription
+ +
+

The LinkGraph object containing the links involving the input objects with the Metcalf +scores.

+
+
+ + +

Raises:

+ + + + + + + + + + + + + +
TypeDescription
+ TypeError + +
+

If the input objects are not of the same type or the object type is invalid.

+
+
+ +
+ Source code in src/nplinker/scoring/metcalf_scoring.py +
def get_links(self, *objects, **parameters):
+    """Get links for the given objects.
+
+    Args:
+        objects: The objects to get links for. All objects must be of the same type, i.e. `GCF`,
+            `Spectrum` or `MolecularFamily` type.
+            If no objects are provided, all detected objects (`npl.gcfs`) will be used.
+        parameters: The scoring parameters to use for the links. The parameters are:
+
+                - cutoff: The minimum score to consider a link (≥cutoff). Default is 0.
+                - standardised: Whether to use standardised scores. Default is False.
+
+    Returns:
+        The `LinkGraph` object containing the links involving the input objects with the Metcalf
+            scores.
+
+    Raises:
+        TypeError: If the input objects are not of the same type or the object type is invalid.
+    """
+    # validate input objects
+    if len(objects) == 0:
+        objects = self.npl.gcfs
+    # check if all objects are of the same type
+    types = {type(i) for i in objects}
+    if len(types) > 1:
+        raise TypeError("Input objects must be of the same type.")
+    # check if the object type is valid
+    obj_type = next(iter(types))
+    if obj_type not in (GCF, Spectrum, MolecularFamily):
+        raise TypeError(
+            f"Invalid type {obj_type}. Input objects must be GCF, Spectrum or MolecularFamily objects."
+        )
+
+    # validate scoring parameters
+    self._cutoff: float = parameters.get("cutoff", 0)
+    self._standardised: bool = parameters.get("standardised", False)
+    parameters.update({"cutoff": self._cutoff, "standardised": self._standardised})
+
+    logger.info(
+        f"MetcalfScoring: #objects={len(objects)}, type={obj_type}, cutoff={self._cutoff}, "
+        f"standardised={self._standardised}"
+    )
+    if not self._standardised:
+        scores_list = self._get_links(*objects, obj_type=obj_type, score_cutoff=self._cutoff)
+    else:
+        if self.metcalf_mean is None or self.metcalf_std is None:
+            raise ValueError(
+                "MetcalfScoring.metcalf_mean and metcalf_std are not set. Run MetcalfScoring.setup first."
+            )
+        # use negative infinity as the score cutoff to ensure we get all links
+        scores_list = self._get_links(*objects, obj_type=obj_type, score_cutoff=-np.inf)
+        scores_list = self._calc_standardised_score(scores_list)
+
+    links = LinkGraph()
+    for score_df in scores_list:
+        for row in score_df.itertuples(index=False):  # row has attributes: spec/mf, gcf, score
+            met = row.spec if score_df.name == LinkType.SPEC_GCF else row.mf
+            links.add_link(
+                row.gcf,
+                met,
+                metcalf=Score(self.name, row.score, parameters),
+            )
+
+    logger.info(f"MetcalfScoring: completed! Found {len(links.links)} links in total.")
+    return links
+
+
+
+ +
+ +
+ + +

+ format_data + + +

+
format_data(data)
+
+ +
+ +

Format the data for display.

+ +
+ Source code in src/nplinker/scoring/metcalf_scoring.py +
def format_data(self, data):
+    """Format the data for display."""
+    # for metcalf the data will just be a floating point value (i.e. the score)
+    return f"{data:.4f}"
+
+
+
+ +
+ +
+ + +

+ sort + + +

+
sort(objects, reverse=True)
+
+ +
+ +

Sort the objects based on the score.

+ +
+ Source code in src/nplinker/scoring/metcalf_scoring.py +
def sort(self, objects, reverse=True):
+    """Sort the objects based on the score."""
+    # sort based on score
+    return sorted(objects, key=lambda objlink: objlink[self], reverse=reverse)
+
+
+
+ +
+ + + +
+ +
+ +
+ + + + +
+ +
+ +
+ + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/2.0.0a3/api/scoring_utils/index.html b/2.0.0a3/api/scoring_utils/index.html new file mode 100644 index 00000000..75d41e1b --- /dev/null +++ b/2.0.0a3/api/scoring_utils/index.html @@ -0,0 +1,1801 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + Utilities - NPLinker + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Utilities

+ +
+ + + +

+ utils + + +

+ +
+ + + +
+ + + + + + + + + +
+ + +

+ get_presence_gcf_strain + + +

+
get_presence_gcf_strain(
+    gcfs: Sequence[GCF], strains: StrainCollection
+) -> DataFrame
+
+ +
+ +

Get the occurrence of strains in gcfs.

+

The occurrence is a DataFrame with GCF objects as index and Strain objects as columns, and the +values are 1 if the gcf occurs in the strain, 0 otherwise.

+ +
+ Source code in src/nplinker/scoring/utils.py +
10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
def get_presence_gcf_strain(gcfs: Sequence[GCF], strains: StrainCollection) -> pd.DataFrame:
+    """Get the occurrence of strains in gcfs.
+
+    The occurrence is a DataFrame with GCF objects as index and Strain objects as columns, and the
+    values are 1 if the gcf occurs in the strain,  0 otherwise.
+    """
+    df_gcf_strain = pd.DataFrame(
+        0,
+        index=gcfs,
+        columns=list(strains),
+        dtype=int,
+    )  # type: ignore
+    for gcf in gcfs:
+        for strain in strains:
+            if gcf.has_strain(strain):
+                df_gcf_strain.loc[gcf, strain] = 1
+    return df_gcf_strain  # type: ignore
+
+
+
+ +
+ +
+ + +

+ get_presence_spec_strain + + +

+
get_presence_spec_strain(
+    spectra: Sequence[Spectrum], strains: StrainCollection
+) -> DataFrame
+
+ +
+ +

Get the occurrence of strains in spectra.

+

The occurrence is a DataFrame with Spectrum objects as index and Strain objects as columns, and +the values are 1 if the spectrum occurs in the strain, 0 otherwise.

+ +
+ Source code in src/nplinker/scoring/utils.py +
29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
def get_presence_spec_strain(
+    spectra: Sequence[Spectrum], strains: StrainCollection
+) -> pd.DataFrame:
+    """Get the occurrence of strains in spectra.
+
+    The occurrence is a DataFrame with Spectrum objects as index and Strain objects as columns, and
+    the values are 1 if the spectrum occurs in the strain, 0 otherwise.
+    """
+    df_spec_strain = pd.DataFrame(
+        0,
+        index=spectra,
+        columns=list(strains),
+        dtype=int,
+    )  # type: ignore
+    for spectrum in spectra:
+        for strain in strains:
+            if spectrum.has_strain(strain):
+                df_spec_strain.loc[spectrum, strain] = 1
+    return df_spec_strain  # type: ignore
+
+
+
+ +
+ +
+ + +

+ get_presence_mf_strain + + +

+
get_presence_mf_strain(
+    mfs: Sequence[MolecularFamily],
+    strains: StrainCollection,
+) -> DataFrame
+
+ +
+ +

Get the occurrence of strains in molecular families.

+

The occurrence is a DataFrame with MolecularFamily objects as index and Strain objects as +columns, and the values are 1 if the molecular family occurs in the strain, 0 otherwise.

+ +
+ Source code in src/nplinker/scoring/utils.py +
50
+51
+52
+53
+54
+55
+56
+57
+58
+59
+60
+61
+62
+63
+64
+65
+66
+67
+68
def get_presence_mf_strain(
+    mfs: Sequence[MolecularFamily], strains: StrainCollection
+) -> pd.DataFrame:
+    """Get the occurrence of strains in molecular families.
+
+    The occurrence is a DataFrame with MolecularFamily objects as index and Strain objects as
+    columns, and the values are 1 if the molecular family occurs in the strain, 0 otherwise.
+    """
+    df_mf_strain = pd.DataFrame(
+        0,
+        index=mfs,
+        columns=list(strains),
+        dtype=int,
+    )  # type: ignore
+    for mf in mfs:
+        for strain in strains:
+            if mf.has_strain(strain):
+                df_mf_strain.loc[mf, strain] = 1
+    return df_mf_strain  # type: ignore
+
+
+
+ +
+ + + +
+ +
+ +
+ + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/2.0.0a3/api/strain/index.html b/2.0.0a3/api/strain/index.html new file mode 100644 index 00000000..666bd0a8 --- /dev/null +++ b/2.0.0a3/api/strain/index.html @@ -0,0 +1,3039 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + Data Models - NPLinker + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Data Models

+ +
+ + + +

+ strain + + +

+ +
+ + + +
+ + + + + + + + +
+ + + +

+ Strain + + +

+
Strain(id: str)
+
+ +
+ + +

To model the mapping between strain id and its aliases.

+

It's recommended to use NCBI taxonomy strain id or name as the primary +id.

+ + + +

Parameters:

+ + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
id + str + +
+

the representative id of the strain.

+
+
+ required +
+ +
+ Source code in src/nplinker/strain/strain.py +
15
+16
+17
+18
+19
+20
+21
+22
def __init__(self, id: str) -> None:
+    """To model the mapping between strain id and its aliases.
+
+    Args:
+        id: the representative id of the strain.
+    """
+    self.id: str = id
+    self._aliases: set[str] = set()
+
+
+ + + +
+ + + + + + + +
+ + + +

+ id + + + + instance-attribute + + +

+
id: str = id
+
+ +
+
+ +
+ +
+ + + +

+ names + + + + property + + +

+
names: set[str]
+
+ +
+ +

Get the set of strain names including id and aliases.

+ + +

Returns:

+ + + + + + + + + + + + + +
TypeDescription
+ set[str] + +
+

A set of names associated with the strain.

+
+
+
+ +
+ +
+ + + +

+ aliases + + + + property + + +

+
aliases: set[str]
+
+ +
+ +

Get the set of known aliases.

+ + +

Returns:

+ + + + + + + + + + + + + +
TypeDescription
+ set[str] + +
+

A set of aliases associated with the strain.

+
+
+
+ +
+ + + +
+ + +

+ add_alias + + +

+
add_alias(alias: str) -> None
+
+ +
+ +

Add an alias to the list of known aliases.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
alias + str + +
+

The alias to add to the list of known aliases.

+
+
+ required +
+ +
+ Source code in src/nplinker/strain/strain.py +
66
+67
+68
+69
+70
+71
+72
+73
+74
+75
+76
+77
def add_alias(self, alias: str) -> None:
+    """Add an alias to the list of known aliases.
+
+    Args:
+        alias: The alias to add to the list of known aliases.
+    """
+    if not isinstance(alias, str):
+        raise TypeError(f"Expected str, got {type(alias)}")
+    if len(alias) == 0:
+        logger.warning("Refusing to add an empty-string alias to strain {%s}", self)
+    else:
+        self._aliases.add(alias)
+
+
+
+ +
+ + + +
+ +
+ +
+ +
+ + + +

+ StrainCollection + + +

+
StrainCollection()
+
+ +
+ + +

A collection of Strain objects.

+ +
+ Source code in src/nplinker/strain/strain_collection.py +
17
+18
+19
+20
def __init__(self):
+    # the order of strains is needed for scoring part, so use a list
+    self._strains: list[Strain] = []
+    self._strain_dict_name: dict[str, list[Strain]] = {}
+
+
+ + + +
+ + + + + + + + + +
+ + +

+ add + + +

+
add(strain: Strain) -> None
+
+ +
+ +

Add strain to the collection.

+

If the strain already exists, merge the aliases.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
strain + Strain + +
+

The strain to add.

+
+
+ required +
+ +
+ Source code in src/nplinker/strain/strain_collection.py +
61
+62
+63
+64
+65
+66
+67
+68
+69
+70
+71
+72
+73
+74
+75
+76
+77
+78
+79
+80
+81
+82
+83
+84
+85
def add(self, strain: Strain) -> None:
+    """Add strain to the collection.
+
+    If the strain already exists, merge the aliases.
+
+    Args:
+        strain: The strain to add.
+    """
+    if strain in self._strains:
+        # only one strain object per id
+        strain_ref = self._strain_dict_name[strain.id][0]
+        new_aliases = [alias for alias in strain.aliases if alias not in strain_ref.aliases]
+        for alias in new_aliases:
+            strain_ref.add_alias(alias)
+            if alias not in self._strain_dict_name:
+                self._strain_dict_name[alias] = [strain_ref]
+            else:
+                self._strain_dict_name[alias].append(strain_ref)
+    else:
+        self._strains.append(strain)
+        for name in strain.names:
+            if name not in self._strain_dict_name:
+                self._strain_dict_name[name] = [strain]
+            else:
+                self._strain_dict_name[name].append(strain)
+
+
+
+ +
+ +
+ + +

+ remove + + +

+
remove(strain: Strain)
+
+ +
+ +

Remove a strain from the collection.

+

It removes the given strain object from the collection by strain id. +If the strain id is not found, raise ValueError.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
strain + Strain + +
+

The strain to remove.

+
+
+ required +
+ + +

Raises:

+ + + + + + + + + + + + + +
TypeDescription
+ ValueError + +
+

If the strain is not found in the collection.

+
+
+ +
+ Source code in src/nplinker/strain/strain_collection.py +
def remove(self, strain: Strain):
+    """Remove a strain from the collection.
+
+    It removes the given strain object from the collection by strain id.
+    If the strain id is not found, raise ValueError.
+
+    Args:
+        strain: The strain to remove.
+
+    Raises:
+        ValueError: If the strain is not found in the collection.
+    """
+    if strain in self._strains:
+        self._strains.remove(strain)
+        # only one strain object per id
+        strain_ref = self._strain_dict_name[strain.id][0]
+        for name in strain_ref.names:
+            if name in self._strain_dict_name:
+                new_strain_list = [s for s in self._strain_dict_name[name] if s.id != strain.id]
+                if not new_strain_list:
+                    del self._strain_dict_name[name]
+                else:
+                    self._strain_dict_name[name] = new_strain_list
+    else:
+        raise ValueError(f"Strain {strain} not found in the strain collection.")
+
+
+
+ +
+ +
+ + +

+ filter + + +

+
filter(strain_set: set[Strain])
+
+ +
+ +

Remove all strains that are not in strain_set from the strain collection.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
strain_set + set[Strain] + +
+

Set of strains to keep.

+
+
+ required +
+ +
+ Source code in src/nplinker/strain/strain_collection.py +
def filter(self, strain_set: set[Strain]):
+    """Remove all strains that are not in strain_set from the strain collection.
+
+    Args:
+        strain_set: Set of strains to keep.
+    """
+    # note that we need to copy the list of strains, as we are modifying it
+    for strain in self._strains.copy():
+        if strain not in strain_set:
+            self.remove(strain)
+
+
+
+ +
+ +
+ + +

+ intersection + + +

+
intersection(other: StrainCollection) -> StrainCollection
+
+ +
+ +

Get the intersection of two strain collections.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
other + StrainCollection + +
+

The other strain collection to compare.

+
+
+ required +
+ + +

Returns:

+ + + + + + + + + + + + + +
TypeDescription
+ StrainCollection + +
+

StrainCollection object containing the strains that are in both collections.

+
+
+ +
+ Source code in src/nplinker/strain/strain_collection.py +
def intersection(self, other: StrainCollection) -> StrainCollection:
+    """Get the intersection of two strain collections.
+
+    Args:
+        other: The other strain collection to compare.
+
+    Returns:
+        StrainCollection object containing the strains that are in both collections.
+    """
+    intersection = StrainCollection()
+    for strain in self:
+        if strain in other:
+            intersection.add(strain)
+    return intersection
+
+
+
+ +
+ +
+ + +

+ has_name + + +

+
has_name(name: str) -> bool
+
+ +
+ +

Check if the strain collection contains the given strain name (id or alias).

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
name + str + +
+

Strain name (id or alias) to check.

+
+
+ required +
+ + +

Returns:

+ + + + + + + + + + + + + +
TypeDescription
+ bool + +
+

True if the strain name is in the collection, False otherwise.

+
+
+ +
+ Source code in src/nplinker/strain/strain_collection.py +
def has_name(self, name: str) -> bool:
+    """Check if the strain collection contains the given strain name (id or alias).
+
+    Args:
+        name: Strain name (id or alias) to check.
+
+    Returns:
+        True if the strain name is in the collection, False otherwise.
+    """
+    return name in self._strain_dict_name
+
+
+
+ +
+ +
+ + +

+ lookup + + +

+
lookup(name: str) -> list[Strain]
+
+ +
+ +

Lookup a strain by name (id or alias).

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
name + str + +
+

Strain name (id or alias) to lookup.

+
+
+ required +
+ + +

Returns:

+ + + + + + + + + + + + + +
TypeDescription
+ list[Strain] + +
+

List of Strain objects with the given name.

+
+
+ + +

Raises:

+ + + + + + + + + + + + + +
TypeDescription
+ ValueError + +
+

If the strain name is not found.

+
+
+ +
+ Source code in src/nplinker/strain/strain_collection.py +
def lookup(self, name: str) -> list[Strain]:
+    """Lookup a strain by name (id or alias).
+
+    Args:
+        name: Strain name (id or alias) to lookup.
+
+    Returns:
+        List of Strain objects with the given name.
+
+    Raises:
+        ValueError: If the strain name is not found.
+    """
+    if name in self._strain_dict_name:
+        return self._strain_dict_name[name]
+    raise ValueError(f"Strain {name} not found in the strain collection.")
+
+
+
+ +
+ +
+ + +

+ read_json + + + + staticmethod + + +

+
read_json(file: str | PathLike) -> 'StrainCollection'
+
+ +
+ +

Read a strain mappings JSON file and return a StrainCollection object.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
file + str | PathLike + +
+

Path to the strain mappings JSON file.

+
+
+ required +
+ + +

Returns:

+ + + + + + + + + + + + + +
TypeDescription
+ 'StrainCollection' + +
+

StrainCollection object.

+
+
+ +
+ Source code in src/nplinker/strain/strain_collection.py +
@staticmethod
+def read_json(file: str | PathLike) -> "StrainCollection":
+    """Read a strain mappings JSON file and return a StrainCollection object.
+
+    Args:
+        file: Path to the strain mappings JSON file.
+
+    Returns:
+        StrainCollection object.
+    """
+    with open(file, "r") as f:
+        json_data = json.load(f)
+
+    # validate json data
+    validate(instance=json_data, schema=STRAIN_MAPPINGS_SCHEMA)
+
+    strain_collection = StrainCollection()
+    for data in json_data["strain_mappings"]:
+        strain = Strain(data["strain_id"])
+        for alias in data["strain_alias"]:
+            strain.add_alias(alias)
+        strain_collection.add(strain)
+    return strain_collection
+
+
+
+ +
+ +
+ + +

+ to_json + + +

+
to_json(file: str | PathLike | None = None) -> str | None
+
+ +
+ +

Convert the StrainCollection object to a JSON string.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
file + str | PathLike | None + +
+

Path to output JSON file. If None, +return the JSON string instead.

+
+
+ None +
+ + +

Returns:

+ + + + + + + + + + + + + + + + + +
TypeDescription
+ str | None + +
+

If file is None, return the JSON string. Otherwise, write the JSON string to the given

+
+
+ str | None + +
+

file.

+
+
+ +
+ Source code in src/nplinker/strain/strain_collection.py +
def to_json(self, file: str | PathLike | None = None) -> str | None:
+    """Convert the StrainCollection object to a JSON string.
+
+    Args:
+        file: Path to output JSON file. If None,
+            return the JSON string instead.
+
+    Returns:
+        If `file` is None, return the JSON string. Otherwise, write the JSON string to the given
+        file.
+    """
+    data_list = [
+        {"strain_id": strain.id, "strain_alias": list(strain.aliases)} for strain in self
+    ]
+    json_data = {"strain_mappings": data_list, "version": "1.0"}
+
+    # validate json data
+    validate(instance=json_data, schema=STRAIN_MAPPINGS_SCHEMA)
+
+    if file is not None:
+        with open(file, "w") as f:
+            json.dump(json_data, f)
+        return None
+    return json.dumps(json_data)
+
+
+
+ +
+ + + +
+ +
+ +
+ + + + +
+ +
+ +
+ + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/2.0.0a3/api/strain_utils/index.html b/2.0.0a3/api/strain_utils/index.html new file mode 100644 index 00000000..f1fe8c08 --- /dev/null +++ b/2.0.0a3/api/strain_utils/index.html @@ -0,0 +1,2119 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + Utilities - NPLinker + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Utilities

+ +
+ + + +

+ utils + + +

+ +
+ + + +
+ + + + + + + +
+ + + +

+ logger + + + + module-attribute + + +

+
logger = getLogger(__name__)
+
+ +
+
+ +
+ + + +
+ + +

+ load_user_strains + + +

+
load_user_strains(json_file: str | PathLike) -> set[Strain]
+
+ +
+ +

Load user specified strains from a JSON file.

+

The JSON file must follow the schema defined in schemas/user_strains.json.

+ + +
+ An example content of the JSON file +
{"strain_ids": ["strain1", "strain2"]}
+
+
+ +

Parameters:

+ + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
json_file + str | PathLike + +
+

Path to the JSON file containing user specified strains.

+
+
+ required +
+ + +

Returns:

+ + + + + + + + + + + + + +
TypeDescription
+ set[Strain] + +
+

A set of user specified strains.

+
+
+ +
+ Source code in src/nplinker/strain/utils.py +
21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
def load_user_strains(json_file: str | PathLike) -> set[Strain]:
+    """Load user specified strains from a JSON file.
+
+    The JSON file must follow the schema defined in `schemas/user_strains.json`.
+
+    An example content of the JSON file:
+        ```
+        {"strain_ids": ["strain1", "strain2"]}
+        ```
+
+    Args:
+        json_file: Path to the JSON file containing user specified strains.
+
+    Returns:
+        A set of user specified strains.
+    """
+    with open(json_file, "r") as f:
+        json_data = json.load(f)
+
+    # validate json data
+    validate(instance=json_data, schema=USER_STRAINS_SCHEMA)
+
+    strains = set()
+    for strain_id in json_data["strain_ids"]:
+        strains.add(Strain(strain_id))
+
+    return strains
+
+
+
+ +
+ +
+ + +

+ podp_generate_strain_mappings + + +

+
podp_generate_strain_mappings(
+    podp_project_json_file: str | PathLike,
+    genome_status_json_file: str | PathLike,
+    genome_bgc_mappings_file: str | PathLike,
+    gnps_file_mappings_file: str | PathLike,
+    output_json_file: str | PathLike,
+) -> StrainCollection
+
+ +
+ +

Generate strain mappings JSON file for PODP pipeline.

+

To get the strain mappings, we need to combine the following mappings:

+
    +
  • strain_id <-> original_genome_id <-> resolved_genome_id <-> bgc_id
  • +
  • strain_id <-> MS_filename <-> spectrum_id
  • +
+

These mappings are extracted from the following files:

+
    +
  • "strain_id <-> original_genome_id" is extracted from podp_project_json_file.
  • +
  • "original_genome_id <-> resolved_genome_id" is extracted from genome_status_json_file.
  • +
  • "resolved_genome_id <-> bgc_id" is extracted from genome_bgc_mappings_file.
  • +
  • "strain_id <-> MS_filename" is extracted from podp_project_json_file.
  • +
  • "MS_filename <-> spectrum_id" is extracted from gnps_file_mappings_file.
  • +
+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
podp_project_json_file + str | PathLike + +
+

The path to the PODP project +JSON file.

+
+
+ required +
genome_status_json_file + str | PathLike + +
+

The path to the genome status +JSON file.

+
+
+ required +
genome_bgc_mappings_file + str | PathLike + +
+

The path to the genome BGC +mappings JSON file.

+
+
+ required +
gnps_file_mappings_file + str | PathLike + +
+

The path to the GNPS file +mappings file (csv or tsv).

+
+
+ required +
output_json_file + str | PathLike + +
+

The path to the output JSON file.

+
+
+ required +
+ + +

Returns:

+ + + + + + + + + + + + + +
TypeDescription
+ StrainCollection + +
+

The strain mappings stored in a StrainCollection object.

+
+
+ + +
+ See Also +
    +
  • extract_mappings_strain_id_original_genome_id: Extract mappings + "strain_id <-> original_genome_id".
  • +
  • extract_mappings_original_genome_id_resolved_genome_id: Extract mappings + "original_genome_id <-> resolved_genome_id".
  • +
  • extract_mappings_resolved_genome_id_bgc_id: Extract mappings + "resolved_genome_id <-> bgc_id".
  • +
  • get_mappings_strain_id_bgc_id: Get mappings "strain_id <-> bgc_id".
  • +
  • extract_mappings_strain_id_ms_filename: Extract mappings + "strain_id <-> MS_filename".
  • +
  • extract_mappings_ms_filename_spectrum_id: Extract mappings + "MS_filename <-> spectrum_id".
  • +
  • get_mappings_strain_id_spectrum_id: Get mappings "strain_id <-> spectrum_id".
  • +
+
+
+ Source code in src/nplinker/strain/utils.py +
def podp_generate_strain_mappings(
+    podp_project_json_file: str | PathLike,
+    genome_status_json_file: str | PathLike,
+    genome_bgc_mappings_file: str | PathLike,
+    gnps_file_mappings_file: str | PathLike,
+    output_json_file: str | PathLike,
+) -> StrainCollection:
+    """Generate strain mappings JSON file for PODP pipeline.
+
+    To get the strain mappings, we need to combine the following mappings:
+
+    - strain_id <-> original_genome_id <-> resolved_genome_id <-> bgc_id
+    - strain_id <-> MS_filename <-> spectrum_id
+
+    These mappings are extracted from the following files:
+
+    - "strain_id <-> original_genome_id" is extracted from `podp_project_json_file`.
+    - "original_genome_id <-> resolved_genome_id" is extracted from `genome_status_json_file`.
+    - "resolved_genome_id <-> bgc_id" is extracted from `genome_bgc_mappings_file`.
+    - "strain_id <-> MS_filename" is extracted from `podp_project_json_file`.
+    - "MS_filename <-> spectrum_id" is extracted from `gnps_file_mappings_file`.
+
+    Args:
+        podp_project_json_file: The path to the PODP project
+            JSON file.
+        genome_status_json_file: The path to the genome status
+            JSON file.
+        genome_bgc_mappings_file: The path to the genome BGC
+            mappings JSON file.
+        gnps_file_mappings_file: The path to the GNPS file
+            mappings file (csv or tsv).
+        output_json_file: The path to the output JSON file.
+
+    Returns:
+        The strain mappings stored in a StrainCollection object.
+
+    See Also:
+        - `extract_mappings_strain_id_original_genome_id`: Extract mappings
+            "strain_id <-> original_genome_id".
+        - `extract_mappings_original_genome_id_resolved_genome_id`: Extract mappings
+            "original_genome_id <-> resolved_genome_id".
+        - `extract_mappings_resolved_genome_id_bgc_id`: Extract mappings
+            "resolved_genome_id <-> bgc_id".
+        - `get_mappings_strain_id_bgc_id`: Get mappings "strain_id <-> bgc_id".
+        - `extract_mappings_strain_id_ms_filename`: Extract mappings
+            "strain_id <-> MS_filename".
+        - `extract_mappings_ms_filename_spectrum_id`: Extract mappings
+            "MS_filename <-> spectrum_id".
+        - `get_mappings_strain_id_spectrum_id`: Get mappings "strain_id <-> spectrum_id".
+    """
+    # Get mappings strain_id <-> original_genome_id <-> resolved_genome_id <-> bgc_id
+    mappings_strain_id_bgc_id = get_mappings_strain_id_bgc_id(
+        extract_mappings_strain_id_original_genome_id(podp_project_json_file),
+        extract_mappings_original_genome_id_resolved_genome_id(genome_status_json_file),
+        extract_mappings_resolved_genome_id_bgc_id(genome_bgc_mappings_file),
+    )
+
+    # Get mappings strain_id <-> MS_filename <-> spectrum_id
+    mappings_strain_id_spectrum_id = get_mappings_strain_id_spectrum_id(
+        extract_mappings_strain_id_ms_filename(podp_project_json_file),
+        extract_mappings_ms_filename_spectrum_id(gnps_file_mappings_file),
+    )
+
+    # Get mappings strain_id <-> bgc_id / spectrum_id
+    mappings = mappings_strain_id_bgc_id.copy()
+    for strain_id, spectrum_ids in mappings_strain_id_spectrum_id.items():
+        if strain_id in mappings:
+            mappings[strain_id].update(spectrum_ids)
+        else:
+            mappings[strain_id] = spectrum_ids.copy()
+
+    # Create StrainCollection
+    sc = StrainCollection()
+    for strain_id, bgc_ids in mappings.items():
+        if not sc.has_name(strain_id):
+            strain = Strain(strain_id)
+            for bgc_id in bgc_ids:
+                strain.add_alias(bgc_id)
+            sc.add(strain)
+        else:
+            # strain_list has only one element
+            strain_list = sc.lookup(strain_id)
+            for bgc_id in bgc_ids:
+                strain_list[0].add_alias(bgc_id)
+
+    # Write strain mappings JSON file
+    sc.to_json(output_json_file)
+    logger.info("Generated strain mappings JSON file: %s", output_json_file)
+
+    return sc
+
+
+
+ +
+ + + +
+ +
+ +
+ + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/2.0.0a3/api/utils/index.html b/2.0.0a3/api/utils/index.html new file mode 100644 index 00000000..c6d1a223 --- /dev/null +++ b/2.0.0a3/api/utils/index.html @@ -0,0 +1,3271 @@ + + + + + + + + + + + + + + + + + + + + + + + + + General Utilities - NPLinker + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + +

General Utilities

+ +
+ + + +

+ utils + + +

+ +
+ + + +
+ + + + + + + +
+ + + +

+ logger + + + + module-attribute + + +

+
logger = getLogger(__name__)
+
+ +
+
+ +
+ + + +
+ + +

+ calculate_md5 + + +

+
calculate_md5(
+    fpath: str | PathLike, chunk_size: int = 1024 * 1024
+) -> str
+
+ +
+ +

Calculate the MD5 checksum of a file.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
fpath + str | PathLike + +
+

Path to the file.

+
+
+ required +
chunk_size + int + +
+

Chunk size for reading the file. Defaults to 1024*1024.

+
+
+ 1024 * 1024 +
+ + +

Returns:

+ + + + + + + + + + + + + +
TypeDescription
+ str + +
+

MD5 checksum of the file.

+
+
+ +
+ Source code in src/nplinker/utils.py +
def calculate_md5(fpath: str | PathLike, chunk_size: int = 1024 * 1024) -> str:
+    """Calculate the MD5 checksum of a file.
+
+    Args:
+        fpath: Path to the file.
+        chunk_size: Chunk size for reading the file. Defaults to 1024*1024.
+
+    Returns:
+        MD5 checksum of the file.
+    """
+    if sys.version_info >= (3, 9):
+        md5 = hashlib.md5(usedforsecurity=False)
+    else:
+        md5 = hashlib.md5()
+    with open(fpath, "rb") as f:
+        for chunk in iter(lambda: f.read(chunk_size), b""):
+            md5.update(chunk)
+    return md5.hexdigest()
+
+
+
+ +
+ +
+ + +

+ check_disk_space + + +

+
check_disk_space(func)
+
+ +
+ +

A decorator to check available disk space.

+

If the available disk space is less than 500GB, a warning is logged and a warning is raised.

+ +
+ Source code in src/nplinker/utils.py +
38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
+54
def check_disk_space(func):
+    """A decorator to check available disk space.
+
+    If the available disk space is less than 500GB, a warning is logged and a warning is raised.
+    """
+
+    @functools.wraps(func)
+    def wrapper_check_disk_space(*args, **kwargs):
+        _, _, free = shutil.disk_usage("/")
+        free_gb = free // (2**30)
+        if free_gb < 50:
+            warning_message = f"Available disk space is {free_gb}GB. Is it enough for your project?"
+            logger.warning(warning_message)
+            warnings.warn(warning_message, UserWarning)
+        return func(*args, **kwargs)
+
+    return wrapper_check_disk_space
+
+
+
+ +
+ +
+ + +

+ check_md5 + + +

+
check_md5(fpath: str | PathLike, md5: str) -> bool
+
+ +
+ +

Verify the MD5 checksum of a file.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
fpath + str | PathLike + +
+

Path to the file.

+
+
+ required +
md5 + str + +
+

MD5 checksum to verify.

+
+
+ required +
+ + +

Returns:

+ + + + + + + + + + + + + +
TypeDescription
+ bool + +
+

True if the MD5 checksum matches, False otherwise.

+
+
+ +
+ Source code in src/nplinker/utils.py +
def check_md5(fpath: str | PathLike, md5: str) -> bool:
+    """Verify the MD5 checksum of a file.
+
+    Args:
+        fpath: Path to the file.
+        md5: MD5 checksum to verify.
+
+    Returns:
+        True if the MD5 checksum matches, False otherwise.
+    """
+    return md5 == calculate_md5(fpath)
+
+
+
+ +
+ +
+ + +

+ download_and_extract_archive + + +

+
download_and_extract_archive(
+    url: str,
+    download_root: str | PathLike,
+    extract_root: str | Path | None = None,
+    filename: str | None = None,
+    md5: str | None = None,
+    remove_finished: bool = False,
+) -> None
+
+ +
+ +

Download a file from url and extract it.

+

This method is a wrapper of download_url and extract_archive methods.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
url + str + +
+

URL to download file from

+
+
+ required +
download_root + str | PathLike + +
+

Path to the directory to place downloaded +file in. If it doesn't exist, it will be created.

+
+
+ required +
extract_root + str | Path | None + +
+

Path to the directory the file +will be extracted to. The given directory will be created if not exist. +If omitted, the download_root is used.

+
+
+ None +
filename + str | None + +
+

Name to save the downloaded file under. +If None, use the basename of the URL

+
+
+ None +
md5 + str | None + +
+

MD5 checksum of the download. If None, do not check

+
+
+ None +
remove_finished + bool + +
+

If True, remove the downloaded file + after the extraction. Defaults to False.

+
+
+ False +
+ +
+ Source code in src/nplinker/utils.py +
def download_and_extract_archive(
+    url: str,
+    download_root: str | PathLike,
+    extract_root: str | Path | None = None,
+    filename: str | None = None,
+    md5: str | None = None,
+    remove_finished: bool = False,
+) -> None:
+    """Download a file from url and extract it.
+
+       This method is a wrapper of `download_url` and `extract_archive` methods.
+
+    Args:
+        url: URL to download file from
+        download_root: Path to the directory to place downloaded
+            file in. If it doesn't exist, it will be created.
+        extract_root: Path to the directory the file
+            will be extracted to. The given directory will be created if not exist.
+            If omitted, the `download_root` is used.
+        filename: Name to save the downloaded file under.
+            If None, use the basename of the URL
+        md5: MD5 checksum of the download. If None, do not check
+        remove_finished: If `True`, remove the downloaded file
+             after the extraction. Defaults to False.
+    """
+    download_root = Path(download_root)
+    if extract_root is None:
+        extract_root = download_root
+    else:
+        extract_root = Path(extract_root)
+    if not filename:
+        filename = Path(url).name
+
+    download_url(url, download_root, filename, md5)
+
+    archive = download_root / filename
+    extract_archive(archive, extract_root, remove_finished=remove_finished)
+
+
+
+ +
+ +
+ + +

+ download_url + + +

+
download_url(
+    url: str,
+    root: str | PathLike,
+    filename: str | None = None,
+    md5: str | None = None,
+    http_method: str = "GET",
+    allow_http_redirect: bool = True,
+) -> None
+
+ +
+ +

Download a file from a url and place it in root.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
url + str + +
+

URL to download file from

+
+
+ required +
root + str | PathLike + +
+

Directory to place downloaded file in. If it doesn't exist, it will be created.

+
+
+ required +
filename + str | None + +
+

Name to save the file under. If None, use the +basename of the URL.

+
+
+ None +
md5 + str | None + +
+

MD5 checksum of the download. If None, do not check.

+
+
+ None +
http_method + str + +
+

HTTP request method, e.g. "GET", "POST". +Defaults to "GET".

+
+
+ 'GET' +
allow_http_redirect + bool + +
+

If true, enable following redirects for all HTTP ("http:") methods.

+
+
+ True +
+ +
+ Source code in src/nplinker/utils.py +
@check_disk_space
+def download_url(
+    url: str,
+    root: str | PathLike,
+    filename: str | None = None,
+    md5: str | None = None,
+    http_method: str = "GET",
+    allow_http_redirect: bool = True,
+) -> None:
+    """Download a file from a url and place it in root.
+
+    Args:
+        url: URL to download file from
+        root: Directory to place downloaded file in. If it doesn't exist, it will be created.
+        filename: Name to save the file under. If None, use the
+            basename of the URL.
+        md5: MD5 checksum of the download. If None, do not check.
+        http_method: HTTP request method, e.g. "GET", "POST".
+            Defaults to "GET".
+        allow_http_redirect: If true, enable following redirects for all HTTP ("http:") methods.
+    """
+    root = transform_to_full_path(root)
+    # create the download directory if not exist
+    root.mkdir(exist_ok=True)
+    if not filename:
+        filename = Path(url).name
+    fpath = root / filename
+
+    # check if file is already present locally
+    if fpath.is_file() and md5 is not None and check_md5(fpath, md5):
+        logger.info("Using downloaded and verified file: " + str(fpath))
+        return
+
+    # download the file
+    logger.info(f"Downloading {filename} to {root}")
+    with open(fpath, "wb") as fh:
+        with httpx.stream(http_method, url, follow_redirects=allow_http_redirect) as response:
+            if not response.is_success:
+                fpath.unlink(missing_ok=True)
+                raise RuntimeError(
+                    f"Failed to download url {url} with status code {response.status_code}"
+                )
+            total = int(response.headers.get("Content-Length", 0))
+
+            with Progress(
+                TextColumn("[progress.description]{task.description}"),
+                BarColumn(bar_width=None),
+                "[progress.percentage]{task.percentage:>3.1f}%",
+                "•",
+                DownloadColumn(),
+                "•",
+                TransferSpeedColumn(),
+                "•",
+                TimeRemainingColumn(),
+                "•",
+                TimeElapsedColumn(),
+            ) as progress:
+                task = progress.add_task(f"[hot_pink]Downloading {fpath.name}", total=total)
+                for chunk in response.iter_bytes():
+                    fh.write(chunk)
+                    progress.update(task, advance=len(chunk))
+
+    # check integrity of downloaded file
+    if md5 is not None and not check_md5(fpath, md5):
+        raise RuntimeError("MD5 validation failed.")
+
+
+
+ +
+ +
+ + +

+ extract_archive + + +

+
extract_archive(
+    from_path: str | PathLike,
+    extract_root: str | PathLike | None = None,
+    members: list | None = None,
+    remove_finished: bool = False,
+) -> str
+
+ +
+ +

Extract an archive.

+

The archive type and a possible compression is automatically detected from +the file name. If the file is compressed but not an archive the call is +dispatched to :func:decompress.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
from_path + str | PathLike + +
+

Path to the file to be extracted.

+
+
+ required +
extract_root + str | PathLike | None + +
+

Path to the directory the file will be extracted to. +The given directory will be created if not exist. +If omitted, the directory of the archive file is used.

+
+
+ None +
members + list | None + +
+

Optional selection of members to extract. If not specified, +all members are extracted. +Members must be a subset of the list returned by +- zipfile.ZipFile.namelist() or a list of strings for zip file +- tarfile.TarFile.getmembers() for tar file

+
+
+ None +
remove_finished + bool + +
+

If True, remove the file after the extraction.

+
+
+ False +
+ + +

Returns:

+ + + + + + + + + + + + + +
TypeDescription
+ str + +
+

Path to the directory the file was extracted to.

+
+
+ +
+ Source code in src/nplinker/utils.py +
def extract_archive(
+    from_path: str | PathLike,
+    extract_root: str | PathLike | None = None,
+    members: list | None = None,
+    remove_finished: bool = False,
+) -> str:
+    """Extract an archive.
+
+    The archive type and a possible compression is automatically detected from
+    the file name. If the file is compressed but not an archive the call is
+    dispatched to :func:`decompress`.
+
+    Args:
+        from_path: Path to the file to be extracted.
+        extract_root: Path to the directory the file will be extracted to.
+            The given directory will be created if not exist.
+            If omitted, the directory of the archive file is used.
+        members: Optional selection of members to extract. If not specified,
+            all members are extracted.
+            Members must be a subset of the list returned by
+            - `zipfile.ZipFile.namelist()` or a list of strings for zip file
+            - `tarfile.TarFile.getmembers()` for tar file
+        remove_finished: If `True`, remove the file after the extraction.
+
+    Returns:
+        Path to the directory the file was extracted to.
+    """
+    from_path = Path(from_path)
+
+    if extract_root is None:
+        extract_root = from_path.parent
+    else:
+        extract_root = Path(extract_root)
+
+    # create the extract directory if not exist
+    extract_root.mkdir(exist_ok=True)
+
+    logger.info(f"Extracting {from_path} to {extract_root}")
+    suffix, archive_type, compression = _detect_file_type(from_path)
+    if not archive_type:
+        return _decompress(
+            from_path,
+            extract_root / from_path.name.replace(suffix, ""),
+            remove_finished=remove_finished,
+        )
+
+    extractor = _ARCHIVE_EXTRACTORS[archive_type]
+
+    extractor(str(from_path), str(extract_root), members, compression)
+    if remove_finished:
+        from_path.unlink()
+
+    return str(extract_root)
+
+
+
+ +
+ +
+ + +

+ is_file_format + + +

+
is_file_format(
+    file: str | PathLike, format: str = "tsv"
+) -> bool
+
+ +
+ +

Check if the file is in the given format.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
file + str | PathLike + +
+

Path to the file to check.

+
+
+ required +
format + str + +
+

The format to check for, either "tsv" or "csv".

+
+
+ 'tsv' +
+ + +

Returns:

+ + + + + + + + + + + + + +
TypeDescription
+ bool + +
+

True if the file is in the given format, False otherwise.

+
+
+ +
+ Source code in src/nplinker/utils.py +
62
+63
+64
+65
+66
+67
+68
+69
+70
+71
+72
+73
+74
+75
+76
+77
+78
+79
+80
+81
+82
+83
+84
def is_file_format(file: str | PathLike, format: str = "tsv") -> bool:
+    """Check if the file is in the given format.
+
+    Args:
+        file: Path to the file to check.
+        format: The format to check for, either "tsv" or "csv".
+
+    Returns:
+        True if the file is in the given format, False otherwise.
+    """
+    try:
+        with open(file, "rt") as f:
+            if format == "tsv":
+                reader = csv.reader(f, delimiter="\t")
+            elif format == "csv":
+                reader = csv.reader(f, delimiter=",")
+            else:
+                raise ValueError(f"Unknown format '{format}'.")
+            for _ in reader:
+                pass
+        return True
+    except csv.Error:
+        return False
+
+
+
+ +
+ +
+ + +

+ list_dirs + + +

+
list_dirs(
+    root: str | PathLike, keep_parent: bool = True
+) -> list[str]
+
+ +
+ +

List all directories at a given root.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
root + str | PathLike + +
+

Path to directory whose folders need to be listed

+
+
+ required +
keep_parent + bool + +
+

If true, prepends the path to each result, otherwise +only returns the name of the directories found

+
+
+ True +
+ +
+ Source code in src/nplinker/utils.py +
def list_dirs(root: str | PathLike, keep_parent: bool = True) -> list[str]:
+    """List all directories at a given root.
+
+    Args:
+        root: Path to directory whose folders need to be listed
+        keep_parent: If true, prepends the path to each result, otherwise
+            only returns the name of the directories found
+    """
+    root = transform_to_full_path(root)
+    directories = [str(p) for p in root.iterdir() if p.is_dir()]
+    if not keep_parent:
+        directories = [os.path.basename(d) for d in directories]
+    return directories
+
+
+
+ +
+ +
+ + +

+ list_files + + +

+
list_files(
+    root: str | PathLike,
+    prefix: str | tuple[str, ...] = "",
+    suffix: str | tuple[str, ...] = "",
+    keep_parent: bool = True,
+) -> list[str]
+
+ +
+ +

List all files at a given root.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
root + str | PathLike + +
+

Path to directory whose files need to be listed

+
+
+ required +
prefix + str | tuple[str, ...] + +
+

Prefix of the file names to match, +Defaults to empty string '""'.

+
+
+ '' +
suffix + str | tuple[str, ...] + +
+

Suffix of the files to match, e.g. ".png" or +(".jpg", ".png"). +Defaults to empty string '""'.

+
+
+ '' +
keep_parent + bool + +
+

If true, prepends the parent path to each +result, otherwise only returns the name of the files found. +Defaults to False.

+
+
+ True +
+ +
+ Source code in src/nplinker/utils.py +
def list_files(
+    root: str | PathLike,
+    prefix: str | tuple[str, ...] = "",
+    suffix: str | tuple[str, ...] = "",
+    keep_parent: bool = True,
+) -> list[str]:
+    """List all files at a given root.
+
+    Args:
+        root: Path to directory whose files need to be listed
+        prefix: Prefix of the file names to match,
+            Defaults to empty string '""'.
+        suffix: Suffix of the files to match, e.g. ".png" or
+            (".jpg", ".png").
+            Defaults to empty string '""'.
+        keep_parent: If true, prepends the parent path to each
+            result, otherwise only returns the name of the files found.
+            Defaults to False.
+    """
+    root = Path(root)
+    files = [
+        str(p)
+        for p in root.iterdir()
+        if p.is_file() and p.name.startswith(prefix) and p.name.endswith(suffix)
+    ]
+
+    if not keep_parent:
+        files = [os.path.basename(f) for f in files]
+
+    return files
+
+
+
+ +
+ +
+ + +

+ transform_to_full_path + + +

+
transform_to_full_path(p: str | PathLike) -> Path
+
+ +
+ +

Transform a path to a full path.

+

The path is expanded (i.e. the ~ will be replaced with actual path) and converted to an +absolute path (i.e. . or .. will be replaced with actual path).

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
p + str | PathLike + +
+

The path to transform.

+
+
+ required +
+ + +

Returns:

+ + + + + + + + + + + + + +
TypeDescription
+ Path + +
+

The transformed full path.

+
+
+ +
+ Source code in src/nplinker/utils.py +
def transform_to_full_path(p: str | PathLike) -> Path:
+    """Transform a path to a full path.
+
+    The path is expanded (i.e. the `~` will be replaced with actual path) and converted to an
+    absolute path (i.e. `.` or `..` will be replaced with actual path).
+
+    Args:
+        p: The path to transform.
+
+    Returns:
+        The transformed full path.
+    """
+    # Multiple calls to `Path` are used to ensure static typing compatibility.
+    p = Path(p).expanduser()
+    p = Path(p).resolve()
+    return Path(p)
+
+
+
+ +
+ + + +
+ +
+ +
+ + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/2.0.0a3/assets/_mkdocstrings.css b/2.0.0a3/assets/_mkdocstrings.css new file mode 100644 index 00000000..85449ec7 --- /dev/null +++ b/2.0.0a3/assets/_mkdocstrings.css @@ -0,0 +1,119 @@ + +/* Avoid breaking parameter names, etc. in table cells. */ +.doc-contents td code { + word-break: normal !important; +} + +/* No line break before first paragraph of descriptions. */ +.doc-md-description, +.doc-md-description>p:first-child { + display: inline; +} + +/* Max width for docstring sections tables. */ +.doc .md-typeset__table, +.doc .md-typeset__table table { + display: table !important; + width: 100%; +} + +.doc .md-typeset__table tr { + display: table-row; +} + +/* Defaults in Spacy table style. */ +.doc-param-default { + float: right; +} + +/* Backward-compatibility: docstring section titles in bold. */ +.doc-section-title { + font-weight: bold; +} + +/* Symbols in Navigation and ToC. */ +:root, +[data-md-color-scheme="default"] { + --doc-symbol-attribute-fg-color: #953800; + --doc-symbol-function-fg-color: #8250df; + --doc-symbol-method-fg-color: #8250df; + --doc-symbol-class-fg-color: #0550ae; + --doc-symbol-module-fg-color: #5cad0f; + + --doc-symbol-attribute-bg-color: #9538001a; + --doc-symbol-function-bg-color: #8250df1a; + --doc-symbol-method-bg-color: #8250df1a; + --doc-symbol-class-bg-color: #0550ae1a; + --doc-symbol-module-bg-color: #5cad0f1a; +} + +[data-md-color-scheme="slate"] { + --doc-symbol-attribute-fg-color: #ffa657; + --doc-symbol-function-fg-color: #d2a8ff; + --doc-symbol-method-fg-color: #d2a8ff; + --doc-symbol-class-fg-color: #79c0ff; + --doc-symbol-module-fg-color: #baff79; + + --doc-symbol-attribute-bg-color: #ffa6571a; + --doc-symbol-function-bg-color: #d2a8ff1a; + --doc-symbol-method-bg-color: #d2a8ff1a; + --doc-symbol-class-bg-color: #79c0ff1a; + --doc-symbol-module-bg-color: #baff791a; +} + +code.doc-symbol { + border-radius: .1rem; + font-size: .85em; + padding: 0 .3em; + font-weight: bold; +} + +code.doc-symbol-attribute { + color: var(--doc-symbol-attribute-fg-color); + background-color: var(--doc-symbol-attribute-bg-color); +} + +code.doc-symbol-attribute::after { + content: "attr"; +} + +code.doc-symbol-function { + color: var(--doc-symbol-function-fg-color); + background-color: var(--doc-symbol-function-bg-color); +} + +code.doc-symbol-function::after { + content: "func"; +} + +code.doc-symbol-method { + color: var(--doc-symbol-method-fg-color); + background-color: var(--doc-symbol-method-bg-color); +} + +code.doc-symbol-method::after { + content: "meth"; +} + +code.doc-symbol-class { + color: var(--doc-symbol-class-fg-color); + background-color: var(--doc-symbol-class-bg-color); +} + +code.doc-symbol-class::after { + content: "class"; +} + +code.doc-symbol-module { + color: var(--doc-symbol-module-fg-color); + background-color: var(--doc-symbol-module-bg-color); +} + +code.doc-symbol-module::after { + content: "mod"; +} + +.doc-signature .autorefs { + color: inherit; + border-bottom: 1px dotted currentcolor; +} diff --git a/2.0.0a3/assets/images/favicon.png b/2.0.0a3/assets/images/favicon.png new file mode 100644 index 0000000000000000000000000000000000000000..1cf13b9f9d978896599290a74f77d5dbe7d1655c GIT binary patch literal 1870 zcmV-U2eJ5xP)Gc)JR9QMau)O=X#!i9;T z37kk-upj^(fsR36MHs_+1RCI)NNu9}lD0S{B^g8PN?Ww(5|~L#Ng*g{WsqleV}|#l zz8@ri&cTzw_h33bHI+12+kK6WN$h#n5cD8OQt`5kw6p~9H3()bUQ8OS4Q4HTQ=1Ol z_JAocz`fLbT2^{`8n~UAo=#AUOf=SOq4pYkt;XbC&f#7lb$*7=$na!mWCQ`dBQsO0 zLFBSPj*N?#u5&pf2t4XjEGH|=pPQ8xh7tpx;US5Cx_Ju;!O`ya-yF`)b%TEt5>eP1ZX~}sjjA%FJF?h7cX8=b!DZl<6%Cv z*G0uvvU+vmnpLZ2paivG-(cd*y3$hCIcsZcYOGh{$&)A6*XX&kXZd3G8m)G$Zz-LV z^GF3VAW^Mdv!)4OM8EgqRiz~*Cji;uzl2uC9^=8I84vNp;ltJ|q-*uQwGp2ma6cY7 z;`%`!9UXO@fr&Ebapfs34OmS9^u6$)bJxrucutf>`dKPKT%%*d3XlFVKunp9 zasduxjrjs>f8V=D|J=XNZp;_Zy^WgQ$9WDjgY=z@stwiEBm9u5*|34&1Na8BMjjgf3+SHcr`5~>oz1Y?SW^=K z^bTyO6>Gar#P_W2gEMwq)ot3; zREHn~U&Dp0l6YT0&k-wLwYjb?5zGK`W6S2v+K>AM(95m2C20L|3m~rN8dprPr@t)5lsk9Hu*W z?pS990s;Ez=+Rj{x7p``4>+c0G5^pYnB1^!TL=(?HLHZ+HicG{~4F1d^5Awl_2!1jICM-!9eoLhbbT^;yHcefyTAaqRcY zmuctDopPT!%k+}x%lZRKnzykr2}}XfG_ne?nRQO~?%hkzo;@RN{P6o`&mMUWBYMTe z6i8ChtjX&gXl`nvrU>jah)2iNM%JdjqoaeaU%yVn!^70x-flljp6Q5tK}5}&X8&&G zX3fpb3E(!rH=zVI_9Gjl45w@{(ITqngWFe7@9{mX;tO25Z_8 zQHEpI+FkTU#4xu>RkN>b3Tnc3UpWzPXWm#o55GKF09j^Mh~)K7{QqbO_~(@CVq! zS<8954|P8mXN2MRs86xZ&Q4EfM@JB94b=(YGuk)s&^jiSF=t3*oNK3`rD{H`yQ?d; ztE=laAUoZx5?RC8*WKOj`%LXEkgDd>&^Q4M^z`%u0rg-It=hLCVsq!Z%^6eB-OvOT zFZ28TN&cRmgU}Elrnk43)!>Z1FCPL2K$7}gwzIc48NX}#!A1BpJP?#v5wkNprhV** z?Cpalt1oH&{r!o3eSKc&ap)iz2BTn_VV`4>9M^b3;(YY}4>#ML6{~(4mH+?%07*qo IM6N<$f(jP3KmY&$ literal 0 HcmV?d00001 diff --git a/2.0.0a3/assets/javascripts/bundle.081f42fc.min.js b/2.0.0a3/assets/javascripts/bundle.081f42fc.min.js new file mode 100644 index 00000000..32734cd3 --- /dev/null +++ b/2.0.0a3/assets/javascripts/bundle.081f42fc.min.js @@ -0,0 +1,29 @@ +"use strict";(()=>{var Fi=Object.create;var gr=Object.defineProperty;var ji=Object.getOwnPropertyDescriptor;var Wi=Object.getOwnPropertyNames,Dt=Object.getOwnPropertySymbols,Ui=Object.getPrototypeOf,xr=Object.prototype.hasOwnProperty,no=Object.prototype.propertyIsEnumerable;var oo=(e,t,r)=>t in e?gr(e,t,{enumerable:!0,configurable:!0,writable:!0,value:r}):e[t]=r,R=(e,t)=>{for(var r in t||(t={}))xr.call(t,r)&&oo(e,r,t[r]);if(Dt)for(var r of Dt(t))no.call(t,r)&&oo(e,r,t[r]);return e};var io=(e,t)=>{var r={};for(var o in e)xr.call(e,o)&&t.indexOf(o)<0&&(r[o]=e[o]);if(e!=null&&Dt)for(var o of Dt(e))t.indexOf(o)<0&&no.call(e,o)&&(r[o]=e[o]);return r};var yr=(e,t)=>()=>(t||e((t={exports:{}}).exports,t),t.exports);var Di=(e,t,r,o)=>{if(t&&typeof t=="object"||typeof t=="function")for(let n of Wi(t))!xr.call(e,n)&&n!==r&&gr(e,n,{get:()=>t[n],enumerable:!(o=ji(t,n))||o.enumerable});return e};var Vt=(e,t,r)=>(r=e!=null?Fi(Ui(e)):{},Di(t||!e||!e.__esModule?gr(r,"default",{value:e,enumerable:!0}):r,e));var ao=(e,t,r)=>new Promise((o,n)=>{var i=p=>{try{s(r.next(p))}catch(c){n(c)}},a=p=>{try{s(r.throw(p))}catch(c){n(c)}},s=p=>p.done?o(p.value):Promise.resolve(p.value).then(i,a);s((r=r.apply(e,t)).next())});var co=yr((Er,so)=>{(function(e,t){typeof Er=="object"&&typeof so!="undefined"?t():typeof define=="function"&&define.amd?define(t):t()})(Er,function(){"use strict";function e(r){var o=!0,n=!1,i=null,a={text:!0,search:!0,url:!0,tel:!0,email:!0,password:!0,number:!0,date:!0,month:!0,week:!0,time:!0,datetime:!0,"datetime-local":!0};function s(H){return!!(H&&H!==document&&H.nodeName!=="HTML"&&H.nodeName!=="BODY"&&"classList"in H&&"contains"in H.classList)}function p(H){var mt=H.type,ze=H.tagName;return!!(ze==="INPUT"&&a[mt]&&!H.readOnly||ze==="TEXTAREA"&&!H.readOnly||H.isContentEditable)}function c(H){H.classList.contains("focus-visible")||(H.classList.add("focus-visible"),H.setAttribute("data-focus-visible-added",""))}function l(H){H.hasAttribute("data-focus-visible-added")&&(H.classList.remove("focus-visible"),H.removeAttribute("data-focus-visible-added"))}function f(H){H.metaKey||H.altKey||H.ctrlKey||(s(r.activeElement)&&c(r.activeElement),o=!0)}function u(H){o=!1}function h(H){s(H.target)&&(o||p(H.target))&&c(H.target)}function w(H){s(H.target)&&(H.target.classList.contains("focus-visible")||H.target.hasAttribute("data-focus-visible-added"))&&(n=!0,window.clearTimeout(i),i=window.setTimeout(function(){n=!1},100),l(H.target))}function A(H){document.visibilityState==="hidden"&&(n&&(o=!0),te())}function te(){document.addEventListener("mousemove",J),document.addEventListener("mousedown",J),document.addEventListener("mouseup",J),document.addEventListener("pointermove",J),document.addEventListener("pointerdown",J),document.addEventListener("pointerup",J),document.addEventListener("touchmove",J),document.addEventListener("touchstart",J),document.addEventListener("touchend",J)}function ie(){document.removeEventListener("mousemove",J),document.removeEventListener("mousedown",J),document.removeEventListener("mouseup",J),document.removeEventListener("pointermove",J),document.removeEventListener("pointerdown",J),document.removeEventListener("pointerup",J),document.removeEventListener("touchmove",J),document.removeEventListener("touchstart",J),document.removeEventListener("touchend",J)}function J(H){H.target.nodeName&&H.target.nodeName.toLowerCase()==="html"||(o=!1,ie())}document.addEventListener("keydown",f,!0),document.addEventListener("mousedown",u,!0),document.addEventListener("pointerdown",u,!0),document.addEventListener("touchstart",u,!0),document.addEventListener("visibilitychange",A,!0),te(),r.addEventListener("focus",h,!0),r.addEventListener("blur",w,!0),r.nodeType===Node.DOCUMENT_FRAGMENT_NODE&&r.host?r.host.setAttribute("data-js-focus-visible",""):r.nodeType===Node.DOCUMENT_NODE&&(document.documentElement.classList.add("js-focus-visible"),document.documentElement.setAttribute("data-js-focus-visible",""))}if(typeof window!="undefined"&&typeof document!="undefined"){window.applyFocusVisiblePolyfill=e;var t;try{t=new CustomEvent("focus-visible-polyfill-ready")}catch(r){t=document.createEvent("CustomEvent"),t.initCustomEvent("focus-visible-polyfill-ready",!1,!1,{})}window.dispatchEvent(t)}typeof document!="undefined"&&e(document)})});var Yr=yr((Rt,Kr)=>{/*! + * clipboard.js v2.0.11 + * https://clipboardjs.com/ + * + * Licensed MIT © Zeno Rocha + */(function(t,r){typeof Rt=="object"&&typeof Kr=="object"?Kr.exports=r():typeof define=="function"&&define.amd?define([],r):typeof Rt=="object"?Rt.ClipboardJS=r():t.ClipboardJS=r()})(Rt,function(){return function(){var e={686:function(o,n,i){"use strict";i.d(n,{default:function(){return Ii}});var a=i(279),s=i.n(a),p=i(370),c=i.n(p),l=i(817),f=i.n(l);function u(V){try{return document.execCommand(V)}catch(_){return!1}}var h=function(_){var O=f()(_);return u("cut"),O},w=h;function A(V){var _=document.documentElement.getAttribute("dir")==="rtl",O=document.createElement("textarea");O.style.fontSize="12pt",O.style.border="0",O.style.padding="0",O.style.margin="0",O.style.position="absolute",O.style[_?"right":"left"]="-9999px";var j=window.pageYOffset||document.documentElement.scrollTop;return O.style.top="".concat(j,"px"),O.setAttribute("readonly",""),O.value=V,O}var te=function(_,O){var j=A(_);O.container.appendChild(j);var D=f()(j);return u("copy"),j.remove(),D},ie=function(_){var O=arguments.length>1&&arguments[1]!==void 0?arguments[1]:{container:document.body},j="";return typeof _=="string"?j=te(_,O):_ instanceof HTMLInputElement&&!["text","search","url","tel","password"].includes(_==null?void 0:_.type)?j=te(_.value,O):(j=f()(_),u("copy")),j},J=ie;function H(V){"@babel/helpers - typeof";return typeof Symbol=="function"&&typeof Symbol.iterator=="symbol"?H=function(O){return typeof O}:H=function(O){return O&&typeof Symbol=="function"&&O.constructor===Symbol&&O!==Symbol.prototype?"symbol":typeof O},H(V)}var mt=function(){var _=arguments.length>0&&arguments[0]!==void 0?arguments[0]:{},O=_.action,j=O===void 0?"copy":O,D=_.container,Y=_.target,ke=_.text;if(j!=="copy"&&j!=="cut")throw new Error('Invalid "action" value, use either "copy" or "cut"');if(Y!==void 0)if(Y&&H(Y)==="object"&&Y.nodeType===1){if(j==="copy"&&Y.hasAttribute("disabled"))throw new Error('Invalid "target" attribute. Please use "readonly" instead of "disabled" attribute');if(j==="cut"&&(Y.hasAttribute("readonly")||Y.hasAttribute("disabled")))throw new Error(`Invalid "target" attribute. You can't cut text from elements with "readonly" or "disabled" attributes`)}else throw new Error('Invalid "target" value, use a valid Element');if(ke)return J(ke,{container:D});if(Y)return j==="cut"?w(Y):J(Y,{container:D})},ze=mt;function Ie(V){"@babel/helpers - typeof";return typeof Symbol=="function"&&typeof Symbol.iterator=="symbol"?Ie=function(O){return typeof O}:Ie=function(O){return O&&typeof Symbol=="function"&&O.constructor===Symbol&&O!==Symbol.prototype?"symbol":typeof O},Ie(V)}function _i(V,_){if(!(V instanceof _))throw new TypeError("Cannot call a class as a function")}function ro(V,_){for(var O=0;O<_.length;O++){var j=_[O];j.enumerable=j.enumerable||!1,j.configurable=!0,"value"in j&&(j.writable=!0),Object.defineProperty(V,j.key,j)}}function Ai(V,_,O){return _&&ro(V.prototype,_),O&&ro(V,O),V}function Ci(V,_){if(typeof _!="function"&&_!==null)throw new TypeError("Super expression must either be null or a function");V.prototype=Object.create(_&&_.prototype,{constructor:{value:V,writable:!0,configurable:!0}}),_&&br(V,_)}function br(V,_){return br=Object.setPrototypeOf||function(j,D){return j.__proto__=D,j},br(V,_)}function Hi(V){var _=Pi();return function(){var j=Wt(V),D;if(_){var Y=Wt(this).constructor;D=Reflect.construct(j,arguments,Y)}else D=j.apply(this,arguments);return ki(this,D)}}function ki(V,_){return _&&(Ie(_)==="object"||typeof _=="function")?_:$i(V)}function $i(V){if(V===void 0)throw new ReferenceError("this hasn't been initialised - super() hasn't been called");return V}function Pi(){if(typeof Reflect=="undefined"||!Reflect.construct||Reflect.construct.sham)return!1;if(typeof Proxy=="function")return!0;try{return Date.prototype.toString.call(Reflect.construct(Date,[],function(){})),!0}catch(V){return!1}}function Wt(V){return Wt=Object.setPrototypeOf?Object.getPrototypeOf:function(O){return O.__proto__||Object.getPrototypeOf(O)},Wt(V)}function vr(V,_){var O="data-clipboard-".concat(V);if(_.hasAttribute(O))return _.getAttribute(O)}var Ri=function(V){Ci(O,V);var _=Hi(O);function O(j,D){var Y;return _i(this,O),Y=_.call(this),Y.resolveOptions(D),Y.listenClick(j),Y}return Ai(O,[{key:"resolveOptions",value:function(){var D=arguments.length>0&&arguments[0]!==void 0?arguments[0]:{};this.action=typeof D.action=="function"?D.action:this.defaultAction,this.target=typeof D.target=="function"?D.target:this.defaultTarget,this.text=typeof D.text=="function"?D.text:this.defaultText,this.container=Ie(D.container)==="object"?D.container:document.body}},{key:"listenClick",value:function(D){var Y=this;this.listener=c()(D,"click",function(ke){return Y.onClick(ke)})}},{key:"onClick",value:function(D){var Y=D.delegateTarget||D.currentTarget,ke=this.action(Y)||"copy",Ut=ze({action:ke,container:this.container,target:this.target(Y),text:this.text(Y)});this.emit(Ut?"success":"error",{action:ke,text:Ut,trigger:Y,clearSelection:function(){Y&&Y.focus(),window.getSelection().removeAllRanges()}})}},{key:"defaultAction",value:function(D){return vr("action",D)}},{key:"defaultTarget",value:function(D){var Y=vr("target",D);if(Y)return document.querySelector(Y)}},{key:"defaultText",value:function(D){return vr("text",D)}},{key:"destroy",value:function(){this.listener.destroy()}}],[{key:"copy",value:function(D){var Y=arguments.length>1&&arguments[1]!==void 0?arguments[1]:{container:document.body};return J(D,Y)}},{key:"cut",value:function(D){return w(D)}},{key:"isSupported",value:function(){var D=arguments.length>0&&arguments[0]!==void 0?arguments[0]:["copy","cut"],Y=typeof D=="string"?[D]:D,ke=!!document.queryCommandSupported;return Y.forEach(function(Ut){ke=ke&&!!document.queryCommandSupported(Ut)}),ke}}]),O}(s()),Ii=Ri},828:function(o){var n=9;if(typeof Element!="undefined"&&!Element.prototype.matches){var i=Element.prototype;i.matches=i.matchesSelector||i.mozMatchesSelector||i.msMatchesSelector||i.oMatchesSelector||i.webkitMatchesSelector}function a(s,p){for(;s&&s.nodeType!==n;){if(typeof s.matches=="function"&&s.matches(p))return s;s=s.parentNode}}o.exports=a},438:function(o,n,i){var a=i(828);function s(l,f,u,h,w){var A=c.apply(this,arguments);return l.addEventListener(u,A,w),{destroy:function(){l.removeEventListener(u,A,w)}}}function p(l,f,u,h,w){return typeof l.addEventListener=="function"?s.apply(null,arguments):typeof u=="function"?s.bind(null,document).apply(null,arguments):(typeof l=="string"&&(l=document.querySelectorAll(l)),Array.prototype.map.call(l,function(A){return s(A,f,u,h,w)}))}function c(l,f,u,h){return function(w){w.delegateTarget=a(w.target,f),w.delegateTarget&&h.call(l,w)}}o.exports=p},879:function(o,n){n.node=function(i){return i!==void 0&&i instanceof HTMLElement&&i.nodeType===1},n.nodeList=function(i){var a=Object.prototype.toString.call(i);return i!==void 0&&(a==="[object NodeList]"||a==="[object HTMLCollection]")&&"length"in i&&(i.length===0||n.node(i[0]))},n.string=function(i){return typeof i=="string"||i instanceof String},n.fn=function(i){var a=Object.prototype.toString.call(i);return a==="[object Function]"}},370:function(o,n,i){var a=i(879),s=i(438);function p(u,h,w){if(!u&&!h&&!w)throw new Error("Missing required arguments");if(!a.string(h))throw new TypeError("Second argument must be a String");if(!a.fn(w))throw new TypeError("Third argument must be a Function");if(a.node(u))return c(u,h,w);if(a.nodeList(u))return l(u,h,w);if(a.string(u))return f(u,h,w);throw new TypeError("First argument must be a String, HTMLElement, HTMLCollection, or NodeList")}function c(u,h,w){return u.addEventListener(h,w),{destroy:function(){u.removeEventListener(h,w)}}}function l(u,h,w){return Array.prototype.forEach.call(u,function(A){A.addEventListener(h,w)}),{destroy:function(){Array.prototype.forEach.call(u,function(A){A.removeEventListener(h,w)})}}}function f(u,h,w){return s(document.body,u,h,w)}o.exports=p},817:function(o){function n(i){var a;if(i.nodeName==="SELECT")i.focus(),a=i.value;else if(i.nodeName==="INPUT"||i.nodeName==="TEXTAREA"){var s=i.hasAttribute("readonly");s||i.setAttribute("readonly",""),i.select(),i.setSelectionRange(0,i.value.length),s||i.removeAttribute("readonly"),a=i.value}else{i.hasAttribute("contenteditable")&&i.focus();var p=window.getSelection(),c=document.createRange();c.selectNodeContents(i),p.removeAllRanges(),p.addRange(c),a=p.toString()}return a}o.exports=n},279:function(o){function n(){}n.prototype={on:function(i,a,s){var p=this.e||(this.e={});return(p[i]||(p[i]=[])).push({fn:a,ctx:s}),this},once:function(i,a,s){var p=this;function c(){p.off(i,c),a.apply(s,arguments)}return c._=a,this.on(i,c,s)},emit:function(i){var a=[].slice.call(arguments,1),s=((this.e||(this.e={}))[i]||[]).slice(),p=0,c=s.length;for(p;p{"use strict";/*! + * escape-html + * Copyright(c) 2012-2013 TJ Holowaychuk + * Copyright(c) 2015 Andreas Lubbe + * Copyright(c) 2015 Tiancheng "Timothy" Gu + * MIT Licensed + */var ts=/["'&<>]/;ei.exports=rs;function rs(e){var t=""+e,r=ts.exec(t);if(!r)return t;var o,n="",i=0,a=0;for(i=r.index;i0&&i[i.length-1])&&(c[0]===6||c[0]===2)){r=0;continue}if(c[0]===3&&(!i||c[1]>i[0]&&c[1]=e.length&&(e=void 0),{value:e&&e[o++],done:!e}}};throw new TypeError(t?"Object is not iterable.":"Symbol.iterator is not defined.")}function N(e,t){var r=typeof Symbol=="function"&&e[Symbol.iterator];if(!r)return e;var o=r.call(e),n,i=[],a;try{for(;(t===void 0||t-- >0)&&!(n=o.next()).done;)i.push(n.value)}catch(s){a={error:s}}finally{try{n&&!n.done&&(r=o.return)&&r.call(o)}finally{if(a)throw a.error}}return i}function q(e,t,r){if(r||arguments.length===2)for(var o=0,n=t.length,i;o1||s(u,h)})})}function s(u,h){try{p(o[u](h))}catch(w){f(i[0][3],w)}}function p(u){u.value instanceof nt?Promise.resolve(u.value.v).then(c,l):f(i[0][2],u)}function c(u){s("next",u)}function l(u){s("throw",u)}function f(u,h){u(h),i.shift(),i.length&&s(i[0][0],i[0][1])}}function mo(e){if(!Symbol.asyncIterator)throw new TypeError("Symbol.asyncIterator is not defined.");var t=e[Symbol.asyncIterator],r;return t?t.call(e):(e=typeof de=="function"?de(e):e[Symbol.iterator](),r={},o("next"),o("throw"),o("return"),r[Symbol.asyncIterator]=function(){return this},r);function o(i){r[i]=e[i]&&function(a){return new Promise(function(s,p){a=e[i](a),n(s,p,a.done,a.value)})}}function n(i,a,s,p){Promise.resolve(p).then(function(c){i({value:c,done:s})},a)}}function k(e){return typeof e=="function"}function ft(e){var t=function(o){Error.call(o),o.stack=new Error().stack},r=e(t);return r.prototype=Object.create(Error.prototype),r.prototype.constructor=r,r}var zt=ft(function(e){return function(r){e(this),this.message=r?r.length+` errors occurred during unsubscription: +`+r.map(function(o,n){return n+1+") "+o.toString()}).join(` + `):"",this.name="UnsubscriptionError",this.errors=r}});function qe(e,t){if(e){var r=e.indexOf(t);0<=r&&e.splice(r,1)}}var Fe=function(){function e(t){this.initialTeardown=t,this.closed=!1,this._parentage=null,this._finalizers=null}return e.prototype.unsubscribe=function(){var t,r,o,n,i;if(!this.closed){this.closed=!0;var a=this._parentage;if(a)if(this._parentage=null,Array.isArray(a))try{for(var s=de(a),p=s.next();!p.done;p=s.next()){var c=p.value;c.remove(this)}}catch(A){t={error:A}}finally{try{p&&!p.done&&(r=s.return)&&r.call(s)}finally{if(t)throw t.error}}else a.remove(this);var l=this.initialTeardown;if(k(l))try{l()}catch(A){i=A instanceof zt?A.errors:[A]}var f=this._finalizers;if(f){this._finalizers=null;try{for(var u=de(f),h=u.next();!h.done;h=u.next()){var w=h.value;try{fo(w)}catch(A){i=i!=null?i:[],A instanceof zt?i=q(q([],N(i)),N(A.errors)):i.push(A)}}}catch(A){o={error:A}}finally{try{h&&!h.done&&(n=u.return)&&n.call(u)}finally{if(o)throw o.error}}}if(i)throw new zt(i)}},e.prototype.add=function(t){var r;if(t&&t!==this)if(this.closed)fo(t);else{if(t instanceof e){if(t.closed||t._hasParent(this))return;t._addParent(this)}(this._finalizers=(r=this._finalizers)!==null&&r!==void 0?r:[]).push(t)}},e.prototype._hasParent=function(t){var r=this._parentage;return r===t||Array.isArray(r)&&r.includes(t)},e.prototype._addParent=function(t){var r=this._parentage;this._parentage=Array.isArray(r)?(r.push(t),r):r?[r,t]:t},e.prototype._removeParent=function(t){var r=this._parentage;r===t?this._parentage=null:Array.isArray(r)&&qe(r,t)},e.prototype.remove=function(t){var r=this._finalizers;r&&qe(r,t),t instanceof e&&t._removeParent(this)},e.EMPTY=function(){var t=new e;return t.closed=!0,t}(),e}();var Tr=Fe.EMPTY;function qt(e){return e instanceof Fe||e&&"closed"in e&&k(e.remove)&&k(e.add)&&k(e.unsubscribe)}function fo(e){k(e)?e():e.unsubscribe()}var $e={onUnhandledError:null,onStoppedNotification:null,Promise:void 0,useDeprecatedSynchronousErrorHandling:!1,useDeprecatedNextContext:!1};var ut={setTimeout:function(e,t){for(var r=[],o=2;o0},enumerable:!1,configurable:!0}),t.prototype._trySubscribe=function(r){return this._throwIfClosed(),e.prototype._trySubscribe.call(this,r)},t.prototype._subscribe=function(r){return this._throwIfClosed(),this._checkFinalizedStatuses(r),this._innerSubscribe(r)},t.prototype._innerSubscribe=function(r){var o=this,n=this,i=n.hasError,a=n.isStopped,s=n.observers;return i||a?Tr:(this.currentObservers=null,s.push(r),new Fe(function(){o.currentObservers=null,qe(s,r)}))},t.prototype._checkFinalizedStatuses=function(r){var o=this,n=o.hasError,i=o.thrownError,a=o.isStopped;n?r.error(i):a&&r.complete()},t.prototype.asObservable=function(){var r=new F;return r.source=this,r},t.create=function(r,o){return new Eo(r,o)},t}(F);var Eo=function(e){re(t,e);function t(r,o){var n=e.call(this)||this;return n.destination=r,n.source=o,n}return t.prototype.next=function(r){var o,n;(n=(o=this.destination)===null||o===void 0?void 0:o.next)===null||n===void 0||n.call(o,r)},t.prototype.error=function(r){var o,n;(n=(o=this.destination)===null||o===void 0?void 0:o.error)===null||n===void 0||n.call(o,r)},t.prototype.complete=function(){var r,o;(o=(r=this.destination)===null||r===void 0?void 0:r.complete)===null||o===void 0||o.call(r)},t.prototype._subscribe=function(r){var o,n;return(n=(o=this.source)===null||o===void 0?void 0:o.subscribe(r))!==null&&n!==void 0?n:Tr},t}(g);var _r=function(e){re(t,e);function t(r){var o=e.call(this)||this;return o._value=r,o}return Object.defineProperty(t.prototype,"value",{get:function(){return this.getValue()},enumerable:!1,configurable:!0}),t.prototype._subscribe=function(r){var o=e.prototype._subscribe.call(this,r);return!o.closed&&r.next(this._value),o},t.prototype.getValue=function(){var r=this,o=r.hasError,n=r.thrownError,i=r._value;if(o)throw n;return this._throwIfClosed(),i},t.prototype.next=function(r){e.prototype.next.call(this,this._value=r)},t}(g);var Lt={now:function(){return(Lt.delegate||Date).now()},delegate:void 0};var _t=function(e){re(t,e);function t(r,o,n){r===void 0&&(r=1/0),o===void 0&&(o=1/0),n===void 0&&(n=Lt);var i=e.call(this)||this;return i._bufferSize=r,i._windowTime=o,i._timestampProvider=n,i._buffer=[],i._infiniteTimeWindow=!0,i._infiniteTimeWindow=o===1/0,i._bufferSize=Math.max(1,r),i._windowTime=Math.max(1,o),i}return t.prototype.next=function(r){var o=this,n=o.isStopped,i=o._buffer,a=o._infiniteTimeWindow,s=o._timestampProvider,p=o._windowTime;n||(i.push(r),!a&&i.push(s.now()+p)),this._trimBuffer(),e.prototype.next.call(this,r)},t.prototype._subscribe=function(r){this._throwIfClosed(),this._trimBuffer();for(var o=this._innerSubscribe(r),n=this,i=n._infiniteTimeWindow,a=n._buffer,s=a.slice(),p=0;p0?e.prototype.schedule.call(this,r,o):(this.delay=o,this.state=r,this.scheduler.flush(this),this)},t.prototype.execute=function(r,o){return o>0||this.closed?e.prototype.execute.call(this,r,o):this._execute(r,o)},t.prototype.requestAsyncId=function(r,o,n){return n===void 0&&(n=0),n!=null&&n>0||n==null&&this.delay>0?e.prototype.requestAsyncId.call(this,r,o,n):(r.flush(this),0)},t}(vt);var So=function(e){re(t,e);function t(){return e!==null&&e.apply(this,arguments)||this}return t}(gt);var Hr=new So(To);var Oo=function(e){re(t,e);function t(r,o){var n=e.call(this,r,o)||this;return n.scheduler=r,n.work=o,n}return t.prototype.requestAsyncId=function(r,o,n){return n===void 0&&(n=0),n!==null&&n>0?e.prototype.requestAsyncId.call(this,r,o,n):(r.actions.push(this),r._scheduled||(r._scheduled=bt.requestAnimationFrame(function(){return r.flush(void 0)})))},t.prototype.recycleAsyncId=function(r,o,n){var i;if(n===void 0&&(n=0),n!=null?n>0:this.delay>0)return e.prototype.recycleAsyncId.call(this,r,o,n);var a=r.actions;o!=null&&((i=a[a.length-1])===null||i===void 0?void 0:i.id)!==o&&(bt.cancelAnimationFrame(o),r._scheduled=void 0)},t}(vt);var Mo=function(e){re(t,e);function t(){return e!==null&&e.apply(this,arguments)||this}return t.prototype.flush=function(r){this._active=!0;var o=this._scheduled;this._scheduled=void 0;var n=this.actions,i;r=r||n.shift();do if(i=r.execute(r.state,r.delay))break;while((r=n[0])&&r.id===o&&n.shift());if(this._active=!1,i){for(;(r=n[0])&&r.id===o&&n.shift();)r.unsubscribe();throw i}},t}(gt);var me=new Mo(Oo);var M=new F(function(e){return e.complete()});function Yt(e){return e&&k(e.schedule)}function kr(e){return e[e.length-1]}function Xe(e){return k(kr(e))?e.pop():void 0}function He(e){return Yt(kr(e))?e.pop():void 0}function Bt(e,t){return typeof kr(e)=="number"?e.pop():t}var xt=function(e){return e&&typeof e.length=="number"&&typeof e!="function"};function Gt(e){return k(e==null?void 0:e.then)}function Jt(e){return k(e[ht])}function Xt(e){return Symbol.asyncIterator&&k(e==null?void 0:e[Symbol.asyncIterator])}function Zt(e){return new TypeError("You provided "+(e!==null&&typeof e=="object"?"an invalid object":"'"+e+"'")+" where a stream was expected. You can provide an Observable, Promise, ReadableStream, Array, AsyncIterable, or Iterable.")}function Gi(){return typeof Symbol!="function"||!Symbol.iterator?"@@iterator":Symbol.iterator}var er=Gi();function tr(e){return k(e==null?void 0:e[er])}function rr(e){return lo(this,arguments,function(){var r,o,n,i;return Nt(this,function(a){switch(a.label){case 0:r=e.getReader(),a.label=1;case 1:a.trys.push([1,,9,10]),a.label=2;case 2:return[4,nt(r.read())];case 3:return o=a.sent(),n=o.value,i=o.done,i?[4,nt(void 0)]:[3,5];case 4:return[2,a.sent()];case 5:return[4,nt(n)];case 6:return[4,a.sent()];case 7:return a.sent(),[3,2];case 8:return[3,10];case 9:return r.releaseLock(),[7];case 10:return[2]}})})}function or(e){return k(e==null?void 0:e.getReader)}function W(e){if(e instanceof F)return e;if(e!=null){if(Jt(e))return Ji(e);if(xt(e))return Xi(e);if(Gt(e))return Zi(e);if(Xt(e))return Lo(e);if(tr(e))return ea(e);if(or(e))return ta(e)}throw Zt(e)}function Ji(e){return new F(function(t){var r=e[ht]();if(k(r.subscribe))return r.subscribe(t);throw new TypeError("Provided object does not correctly implement Symbol.observable")})}function Xi(e){return new F(function(t){for(var r=0;r=2;return function(o){return o.pipe(e?v(function(n,i){return e(n,i,o)}):le,Te(1),r?Be(t):zo(function(){return new ir}))}}function Fr(e){return e<=0?function(){return M}:y(function(t,r){var o=[];t.subscribe(T(r,function(n){o.push(n),e=2,!0))}function pe(e){e===void 0&&(e={});var t=e.connector,r=t===void 0?function(){return new g}:t,o=e.resetOnError,n=o===void 0?!0:o,i=e.resetOnComplete,a=i===void 0?!0:i,s=e.resetOnRefCountZero,p=s===void 0?!0:s;return function(c){var l,f,u,h=0,w=!1,A=!1,te=function(){f==null||f.unsubscribe(),f=void 0},ie=function(){te(),l=u=void 0,w=A=!1},J=function(){var H=l;ie(),H==null||H.unsubscribe()};return y(function(H,mt){h++,!A&&!w&&te();var ze=u=u!=null?u:r();mt.add(function(){h--,h===0&&!A&&!w&&(f=Wr(J,p))}),ze.subscribe(mt),!l&&h>0&&(l=new at({next:function(Ie){return ze.next(Ie)},error:function(Ie){A=!0,te(),f=Wr(ie,n,Ie),ze.error(Ie)},complete:function(){w=!0,te(),f=Wr(ie,a),ze.complete()}}),W(H).subscribe(l))})(c)}}function Wr(e,t){for(var r=[],o=2;oe.next(document)),e}function $(e,t=document){return Array.from(t.querySelectorAll(e))}function P(e,t=document){let r=fe(e,t);if(typeof r=="undefined")throw new ReferenceError(`Missing element: expected "${e}" to be present`);return r}function fe(e,t=document){return t.querySelector(e)||void 0}function Re(){var e,t,r,o;return(o=(r=(t=(e=document.activeElement)==null?void 0:e.shadowRoot)==null?void 0:t.activeElement)!=null?r:document.activeElement)!=null?o:void 0}var xa=S(d(document.body,"focusin"),d(document.body,"focusout")).pipe(_e(1),Q(void 0),m(()=>Re()||document.body),B(1));function et(e){return xa.pipe(m(t=>e.contains(t)),K())}function kt(e,t){return C(()=>S(d(e,"mouseenter").pipe(m(()=>!0)),d(e,"mouseleave").pipe(m(()=>!1))).pipe(t?Ht(r=>Me(+!r*t)):le,Q(e.matches(":hover"))))}function Bo(e,t){if(typeof t=="string"||typeof t=="number")e.innerHTML+=t.toString();else if(t instanceof Node)e.appendChild(t);else if(Array.isArray(t))for(let r of t)Bo(e,r)}function x(e,t,...r){let o=document.createElement(e);if(t)for(let n of Object.keys(t))typeof t[n]!="undefined"&&(typeof t[n]!="boolean"?o.setAttribute(n,t[n]):o.setAttribute(n,""));for(let n of r)Bo(o,n);return o}function sr(e){if(e>999){let t=+((e-950)%1e3>99);return`${((e+1e-6)/1e3).toFixed(t)}k`}else return e.toString()}function wt(e){let t=x("script",{src:e});return C(()=>(document.head.appendChild(t),S(d(t,"load"),d(t,"error").pipe(b(()=>$r(()=>new ReferenceError(`Invalid script: ${e}`))))).pipe(m(()=>{}),L(()=>document.head.removeChild(t)),Te(1))))}var Go=new g,ya=C(()=>typeof ResizeObserver=="undefined"?wt("https://unpkg.com/resize-observer-polyfill"):I(void 0)).pipe(m(()=>new ResizeObserver(e=>e.forEach(t=>Go.next(t)))),b(e=>S(Ke,I(e)).pipe(L(()=>e.disconnect()))),B(1));function ce(e){return{width:e.offsetWidth,height:e.offsetHeight}}function ge(e){let t=e;for(;t.clientWidth===0&&t.parentElement;)t=t.parentElement;return ya.pipe(E(r=>r.observe(t)),b(r=>Go.pipe(v(o=>o.target===t),L(()=>r.unobserve(t)))),m(()=>ce(e)),Q(ce(e)))}function Tt(e){return{width:e.scrollWidth,height:e.scrollHeight}}function cr(e){let t=e.parentElement;for(;t&&(e.scrollWidth<=t.scrollWidth&&e.scrollHeight<=t.scrollHeight);)t=(e=t).parentElement;return t?e:void 0}function Jo(e){let t=[],r=e.parentElement;for(;r;)(e.clientWidth>r.clientWidth||e.clientHeight>r.clientHeight)&&t.push(r),r=(e=r).parentElement;return t.length===0&&t.push(document.documentElement),t}function Ue(e){return{x:e.offsetLeft,y:e.offsetTop}}function Xo(e){let t=e.getBoundingClientRect();return{x:t.x+window.scrollX,y:t.y+window.scrollY}}function Zo(e){return S(d(window,"load"),d(window,"resize")).pipe(Le(0,me),m(()=>Ue(e)),Q(Ue(e)))}function pr(e){return{x:e.scrollLeft,y:e.scrollTop}}function De(e){return S(d(e,"scroll"),d(window,"scroll"),d(window,"resize")).pipe(Le(0,me),m(()=>pr(e)),Q(pr(e)))}var en=new g,Ea=C(()=>I(new IntersectionObserver(e=>{for(let t of e)en.next(t)},{threshold:0}))).pipe(b(e=>S(Ke,I(e)).pipe(L(()=>e.disconnect()))),B(1));function tt(e){return Ea.pipe(E(t=>t.observe(e)),b(t=>en.pipe(v(({target:r})=>r===e),L(()=>t.unobserve(e)),m(({isIntersecting:r})=>r))))}function tn(e,t=16){return De(e).pipe(m(({y:r})=>{let o=ce(e),n=Tt(e);return r>=n.height-o.height-t}),K())}var lr={drawer:P("[data-md-toggle=drawer]"),search:P("[data-md-toggle=search]")};function rn(e){return lr[e].checked}function Je(e,t){lr[e].checked!==t&&lr[e].click()}function Ve(e){let t=lr[e];return d(t,"change").pipe(m(()=>t.checked),Q(t.checked))}function wa(e,t){switch(e.constructor){case HTMLInputElement:return e.type==="radio"?/^Arrow/.test(t):!0;case HTMLSelectElement:case HTMLTextAreaElement:return!0;default:return e.isContentEditable}}function Ta(){return S(d(window,"compositionstart").pipe(m(()=>!0)),d(window,"compositionend").pipe(m(()=>!1))).pipe(Q(!1))}function on(){let e=d(window,"keydown").pipe(v(t=>!(t.metaKey||t.ctrlKey)),m(t=>({mode:rn("search")?"search":"global",type:t.key,claim(){t.preventDefault(),t.stopPropagation()}})),v(({mode:t,type:r})=>{if(t==="global"){let o=Re();if(typeof o!="undefined")return!wa(o,r)}return!0}),pe());return Ta().pipe(b(t=>t?M:e))}function xe(){return new URL(location.href)}function pt(e,t=!1){if(G("navigation.instant")&&!t){let r=x("a",{href:e.href});document.body.appendChild(r),r.click(),r.remove()}else location.href=e.href}function nn(){return new g}function an(){return location.hash.slice(1)}function sn(e){let t=x("a",{href:e});t.addEventListener("click",r=>r.stopPropagation()),t.click()}function Sa(e){return S(d(window,"hashchange"),e).pipe(m(an),Q(an()),v(t=>t.length>0),B(1))}function cn(e){return Sa(e).pipe(m(t=>fe(`[id="${t}"]`)),v(t=>typeof t!="undefined"))}function $t(e){let t=matchMedia(e);return ar(r=>t.addListener(()=>r(t.matches))).pipe(Q(t.matches))}function pn(){let e=matchMedia("print");return S(d(window,"beforeprint").pipe(m(()=>!0)),d(window,"afterprint").pipe(m(()=>!1))).pipe(Q(e.matches))}function Nr(e,t){return e.pipe(b(r=>r?t():M))}function zr(e,t){return new F(r=>{let o=new XMLHttpRequest;return o.open("GET",`${e}`),o.responseType="blob",o.addEventListener("load",()=>{o.status>=200&&o.status<300?(r.next(o.response),r.complete()):r.error(new Error(o.statusText))}),o.addEventListener("error",()=>{r.error(new Error("Network error"))}),o.addEventListener("abort",()=>{r.complete()}),typeof(t==null?void 0:t.progress$)!="undefined"&&(o.addEventListener("progress",n=>{var i;if(n.lengthComputable)t.progress$.next(n.loaded/n.total*100);else{let a=(i=o.getResponseHeader("Content-Length"))!=null?i:0;t.progress$.next(n.loaded/+a*100)}}),t.progress$.next(5)),o.send(),()=>o.abort()})}function Ne(e,t){return zr(e,t).pipe(b(r=>r.text()),m(r=>JSON.parse(r)),B(1))}function ln(e,t){let r=new DOMParser;return zr(e,t).pipe(b(o=>o.text()),m(o=>r.parseFromString(o,"text/html")),B(1))}function mn(e,t){let r=new DOMParser;return zr(e,t).pipe(b(o=>o.text()),m(o=>r.parseFromString(o,"text/xml")),B(1))}function fn(){return{x:Math.max(0,scrollX),y:Math.max(0,scrollY)}}function un(){return S(d(window,"scroll",{passive:!0}),d(window,"resize",{passive:!0})).pipe(m(fn),Q(fn()))}function dn(){return{width:innerWidth,height:innerHeight}}function hn(){return d(window,"resize",{passive:!0}).pipe(m(dn),Q(dn()))}function bn(){return z([un(),hn()]).pipe(m(([e,t])=>({offset:e,size:t})),B(1))}function mr(e,{viewport$:t,header$:r}){let o=t.pipe(Z("size")),n=z([o,r]).pipe(m(()=>Ue(e)));return z([r,t,n]).pipe(m(([{height:i},{offset:a,size:s},{x:p,y:c}])=>({offset:{x:a.x-p,y:a.y-c+i},size:s})))}function Oa(e){return d(e,"message",t=>t.data)}function Ma(e){let t=new g;return t.subscribe(r=>e.postMessage(r)),t}function vn(e,t=new Worker(e)){let r=Oa(t),o=Ma(t),n=new g;n.subscribe(o);let i=o.pipe(X(),ne(!0));return n.pipe(X(),Pe(r.pipe(U(i))),pe())}var La=P("#__config"),St=JSON.parse(La.textContent);St.base=`${new URL(St.base,xe())}`;function ye(){return St}function G(e){return St.features.includes(e)}function Ee(e,t){return typeof t!="undefined"?St.translations[e].replace("#",t.toString()):St.translations[e]}function Se(e,t=document){return P(`[data-md-component=${e}]`,t)}function ae(e,t=document){return $(`[data-md-component=${e}]`,t)}function _a(e){let t=P(".md-typeset > :first-child",e);return d(t,"click",{once:!0}).pipe(m(()=>P(".md-typeset",e)),m(r=>({hash:__md_hash(r.innerHTML)})))}function gn(e){if(!G("announce.dismiss")||!e.childElementCount)return M;if(!e.hidden){let t=P(".md-typeset",e);__md_hash(t.innerHTML)===__md_get("__announce")&&(e.hidden=!0)}return C(()=>{let t=new g;return t.subscribe(({hash:r})=>{e.hidden=!0,__md_set("__announce",r)}),_a(e).pipe(E(r=>t.next(r)),L(()=>t.complete()),m(r=>R({ref:e},r)))})}function Aa(e,{target$:t}){return t.pipe(m(r=>({hidden:r!==e})))}function xn(e,t){let r=new g;return r.subscribe(({hidden:o})=>{e.hidden=o}),Aa(e,t).pipe(E(o=>r.next(o)),L(()=>r.complete()),m(o=>R({ref:e},o)))}function Pt(e,t){return t==="inline"?x("div",{class:"md-tooltip md-tooltip--inline",id:e,role:"tooltip"},x("div",{class:"md-tooltip__inner md-typeset"})):x("div",{class:"md-tooltip",id:e,role:"tooltip"},x("div",{class:"md-tooltip__inner md-typeset"}))}function yn(...e){return x("div",{class:"md-tooltip2",role:"tooltip"},x("div",{class:"md-tooltip2__inner md-typeset"},e))}function En(e,t){if(t=t?`${t}_annotation_${e}`:void 0,t){let r=t?`#${t}`:void 0;return x("aside",{class:"md-annotation",tabIndex:0},Pt(t),x("a",{href:r,class:"md-annotation__index",tabIndex:-1},x("span",{"data-md-annotation-id":e})))}else return x("aside",{class:"md-annotation",tabIndex:0},Pt(t),x("span",{class:"md-annotation__index",tabIndex:-1},x("span",{"data-md-annotation-id":e})))}function wn(e){return x("button",{class:"md-clipboard md-icon",title:Ee("clipboard.copy"),"data-clipboard-target":`#${e} > code`})}function qr(e,t){let r=t&2,o=t&1,n=Object.keys(e.terms).filter(p=>!e.terms[p]).reduce((p,c)=>[...p,x("del",null,c)," "],[]).slice(0,-1),i=ye(),a=new URL(e.location,i.base);G("search.highlight")&&a.searchParams.set("h",Object.entries(e.terms).filter(([,p])=>p).reduce((p,[c])=>`${p} ${c}`.trim(),""));let{tags:s}=ye();return x("a",{href:`${a}`,class:"md-search-result__link",tabIndex:-1},x("article",{class:"md-search-result__article md-typeset","data-md-score":e.score.toFixed(2)},r>0&&x("div",{class:"md-search-result__icon md-icon"}),r>0&&x("h1",null,e.title),r<=0&&x("h2",null,e.title),o>0&&e.text.length>0&&e.text,e.tags&&e.tags.map(p=>{let c=s?p in s?`md-tag-icon md-tag--${s[p]}`:"md-tag-icon":"";return x("span",{class:`md-tag ${c}`},p)}),o>0&&n.length>0&&x("p",{class:"md-search-result__terms"},Ee("search.result.term.missing"),": ",...n)))}function Tn(e){let t=e[0].score,r=[...e],o=ye(),n=r.findIndex(l=>!`${new URL(l.location,o.base)}`.includes("#")),[i]=r.splice(n,1),a=r.findIndex(l=>l.scoreqr(l,1)),...p.length?[x("details",{class:"md-search-result__more"},x("summary",{tabIndex:-1},x("div",null,p.length>0&&p.length===1?Ee("search.result.more.one"):Ee("search.result.more.other",p.length))),...p.map(l=>qr(l,1)))]:[]];return x("li",{class:"md-search-result__item"},c)}function Sn(e){return x("ul",{class:"md-source__facts"},Object.entries(e).map(([t,r])=>x("li",{class:`md-source__fact md-source__fact--${t}`},typeof r=="number"?sr(r):r)))}function Qr(e){let t=`tabbed-control tabbed-control--${e}`;return x("div",{class:t,hidden:!0},x("button",{class:"tabbed-button",tabIndex:-1,"aria-hidden":"true"}))}function On(e){return x("div",{class:"md-typeset__scrollwrap"},x("div",{class:"md-typeset__table"},e))}function Ca(e){var o;let t=ye(),r=new URL(`../${e.version}/`,t.base);return x("li",{class:"md-version__item"},x("a",{href:`${r}`,class:"md-version__link"},e.title,((o=t.version)==null?void 0:o.alias)&&e.aliases.length>0&&x("span",{class:"md-version__alias"},e.aliases[0])))}function Mn(e,t){var o;let r=ye();return e=e.filter(n=>{var i;return!((i=n.properties)!=null&&i.hidden)}),x("div",{class:"md-version"},x("button",{class:"md-version__current","aria-label":Ee("select.version")},t.title,((o=r.version)==null?void 0:o.alias)&&t.aliases.length>0&&x("span",{class:"md-version__alias"},t.aliases[0])),x("ul",{class:"md-version__list"},e.map(Ca)))}var Ha=0;function ka(e){let t=z([et(e),kt(e)]).pipe(m(([o,n])=>o||n),K()),r=C(()=>Jo(e)).pipe(oe(De),ct(1),m(()=>Xo(e)));return t.pipe(Ae(o=>o),b(()=>z([t,r])),m(([o,n])=>({active:o,offset:n})),pe())}function $a(e,t){let{content$:r,viewport$:o}=t,n=`__tooltip2_${Ha++}`;return C(()=>{let i=new g,a=new _r(!1);i.pipe(X(),ne(!1)).subscribe(a);let s=a.pipe(Ht(c=>Me(+!c*250,Hr)),K(),b(c=>c?r:M),E(c=>c.id=n),pe());z([i.pipe(m(({active:c})=>c)),s.pipe(b(c=>kt(c,250)),Q(!1))]).pipe(m(c=>c.some(l=>l))).subscribe(a);let p=a.pipe(v(c=>c),ee(s,o),m(([c,l,{size:f}])=>{let u=e.getBoundingClientRect(),h=u.width/2;if(l.role==="tooltip")return{x:h,y:8+u.height};if(u.y>=f.height/2){let{height:w}=ce(l);return{x:h,y:-16-w}}else return{x:h,y:16+u.height}}));return z([s,i,p]).subscribe(([c,{offset:l},f])=>{c.style.setProperty("--md-tooltip-host-x",`${l.x}px`),c.style.setProperty("--md-tooltip-host-y",`${l.y}px`),c.style.setProperty("--md-tooltip-x",`${f.x}px`),c.style.setProperty("--md-tooltip-y",`${f.y}px`),c.classList.toggle("md-tooltip2--top",f.y<0),c.classList.toggle("md-tooltip2--bottom",f.y>=0)}),a.pipe(v(c=>c),ee(s,(c,l)=>l),v(c=>c.role==="tooltip")).subscribe(c=>{let l=ce(P(":scope > *",c));c.style.setProperty("--md-tooltip-width",`${l.width}px`),c.style.setProperty("--md-tooltip-tail","0px")}),a.pipe(K(),be(me),ee(s)).subscribe(([c,l])=>{l.classList.toggle("md-tooltip2--active",c)}),z([a.pipe(v(c=>c)),s]).subscribe(([c,l])=>{l.role==="dialog"?(e.setAttribute("aria-controls",n),e.setAttribute("aria-haspopup","dialog")):e.setAttribute("aria-describedby",n)}),a.pipe(v(c=>!c)).subscribe(()=>{e.removeAttribute("aria-controls"),e.removeAttribute("aria-describedby"),e.removeAttribute("aria-haspopup")}),ka(e).pipe(E(c=>i.next(c)),L(()=>i.complete()),m(c=>R({ref:e},c)))})}function lt(e,{viewport$:t},r=document.body){return $a(e,{content$:new F(o=>{let n=e.title,i=yn(n);return o.next(i),e.removeAttribute("title"),r.append(i),()=>{i.remove(),e.setAttribute("title",n)}}),viewport$:t})}function Pa(e,t){let r=C(()=>z([Zo(e),De(t)])).pipe(m(([{x:o,y:n},i])=>{let{width:a,height:s}=ce(e);return{x:o-i.x+a/2,y:n-i.y+s/2}}));return et(e).pipe(b(o=>r.pipe(m(n=>({active:o,offset:n})),Te(+!o||1/0))))}function Ln(e,t,{target$:r}){let[o,n]=Array.from(e.children);return C(()=>{let i=new g,a=i.pipe(X(),ne(!0));return i.subscribe({next({offset:s}){e.style.setProperty("--md-tooltip-x",`${s.x}px`),e.style.setProperty("--md-tooltip-y",`${s.y}px`)},complete(){e.style.removeProperty("--md-tooltip-x"),e.style.removeProperty("--md-tooltip-y")}}),tt(e).pipe(U(a)).subscribe(s=>{e.toggleAttribute("data-md-visible",s)}),S(i.pipe(v(({active:s})=>s)),i.pipe(_e(250),v(({active:s})=>!s))).subscribe({next({active:s}){s?e.prepend(o):o.remove()},complete(){e.prepend(o)}}),i.pipe(Le(16,me)).subscribe(({active:s})=>{o.classList.toggle("md-tooltip--active",s)}),i.pipe(ct(125,me),v(()=>!!e.offsetParent),m(()=>e.offsetParent.getBoundingClientRect()),m(({x:s})=>s)).subscribe({next(s){s?e.style.setProperty("--md-tooltip-0",`${-s}px`):e.style.removeProperty("--md-tooltip-0")},complete(){e.style.removeProperty("--md-tooltip-0")}}),d(n,"click").pipe(U(a),v(s=>!(s.metaKey||s.ctrlKey))).subscribe(s=>{s.stopPropagation(),s.preventDefault()}),d(n,"mousedown").pipe(U(a),ee(i)).subscribe(([s,{active:p}])=>{var c;if(s.button!==0||s.metaKey||s.ctrlKey)s.preventDefault();else if(p){s.preventDefault();let l=e.parentElement.closest(".md-annotation");l instanceof HTMLElement?l.focus():(c=Re())==null||c.blur()}}),r.pipe(U(a),v(s=>s===o),Ge(125)).subscribe(()=>e.focus()),Pa(e,t).pipe(E(s=>i.next(s)),L(()=>i.complete()),m(s=>R({ref:e},s)))})}function Ra(e){return e.tagName==="CODE"?$(".c, .c1, .cm",e):[e]}function Ia(e){let t=[];for(let r of Ra(e)){let o=[],n=document.createNodeIterator(r,NodeFilter.SHOW_TEXT);for(let i=n.nextNode();i;i=n.nextNode())o.push(i);for(let i of o){let a;for(;a=/(\(\d+\))(!)?/.exec(i.textContent);){let[,s,p]=a;if(typeof p=="undefined"){let c=i.splitText(a.index);i=c.splitText(s.length),t.push(c)}else{i.textContent=s,t.push(i);break}}}}return t}function _n(e,t){t.append(...Array.from(e.childNodes))}function fr(e,t,{target$:r,print$:o}){let n=t.closest("[id]"),i=n==null?void 0:n.id,a=new Map;for(let s of Ia(t)){let[,p]=s.textContent.match(/\((\d+)\)/);fe(`:scope > li:nth-child(${p})`,e)&&(a.set(p,En(p,i)),s.replaceWith(a.get(p)))}return a.size===0?M:C(()=>{let s=new g,p=s.pipe(X(),ne(!0)),c=[];for(let[l,f]of a)c.push([P(".md-typeset",f),P(`:scope > li:nth-child(${l})`,e)]);return o.pipe(U(p)).subscribe(l=>{e.hidden=!l,e.classList.toggle("md-annotation-list",l);for(let[f,u]of c)l?_n(f,u):_n(u,f)}),S(...[...a].map(([,l])=>Ln(l,t,{target$:r}))).pipe(L(()=>s.complete()),pe())})}function An(e){if(e.nextElementSibling){let t=e.nextElementSibling;if(t.tagName==="OL")return t;if(t.tagName==="P"&&!t.children.length)return An(t)}}function Cn(e,t){return C(()=>{let r=An(e);return typeof r!="undefined"?fr(r,e,t):M})}var Hn=Vt(Yr());var Fa=0;function kn(e){if(e.nextElementSibling){let t=e.nextElementSibling;if(t.tagName==="OL")return t;if(t.tagName==="P"&&!t.children.length)return kn(t)}}function ja(e){return ge(e).pipe(m(({width:t})=>({scrollable:Tt(e).width>t})),Z("scrollable"))}function $n(e,t){let{matches:r}=matchMedia("(hover)"),o=C(()=>{let n=new g,i=n.pipe(Fr(1));n.subscribe(({scrollable:c})=>{c&&r?e.setAttribute("tabindex","0"):e.removeAttribute("tabindex")});let a=[];if(Hn.default.isSupported()&&(e.closest(".copy")||G("content.code.copy")&&!e.closest(".no-copy"))){let c=e.closest("pre");c.id=`__code_${Fa++}`;let l=wn(c.id);c.insertBefore(l,e),G("content.tooltips")&&a.push(lt(l,{viewport$}))}let s=e.closest(".highlight");if(s instanceof HTMLElement){let c=kn(s);if(typeof c!="undefined"&&(s.classList.contains("annotate")||G("content.code.annotate"))){let l=fr(c,e,t);a.push(ge(s).pipe(U(i),m(({width:f,height:u})=>f&&u),K(),b(f=>f?l:M)))}}return $(":scope > span[id]",e).length&&e.classList.add("md-code__content"),ja(e).pipe(E(c=>n.next(c)),L(()=>n.complete()),m(c=>R({ref:e},c)),Pe(...a))});return G("content.lazy")?tt(e).pipe(v(n=>n),Te(1),b(()=>o)):o}function Wa(e,{target$:t,print$:r}){let o=!0;return S(t.pipe(m(n=>n.closest("details:not([open])")),v(n=>e===n),m(()=>({action:"open",reveal:!0}))),r.pipe(v(n=>n||!o),E(()=>o=e.open),m(n=>({action:n?"open":"close"}))))}function Pn(e,t){return C(()=>{let r=new g;return r.subscribe(({action:o,reveal:n})=>{e.toggleAttribute("open",o==="open"),n&&e.scrollIntoView()}),Wa(e,t).pipe(E(o=>r.next(o)),L(()=>r.complete()),m(o=>R({ref:e},o)))})}var Rn=".node circle,.node ellipse,.node path,.node polygon,.node rect{fill:var(--md-mermaid-node-bg-color);stroke:var(--md-mermaid-node-fg-color)}marker{fill:var(--md-mermaid-edge-color)!important}.edgeLabel .label rect{fill:#0000}.label{color:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}.label foreignObject{line-height:normal;overflow:visible}.label div .edgeLabel{color:var(--md-mermaid-label-fg-color)}.edgeLabel,.edgeLabel rect,.label div .edgeLabel{background-color:var(--md-mermaid-label-bg-color)}.edgeLabel,.edgeLabel rect{fill:var(--md-mermaid-label-bg-color);color:var(--md-mermaid-edge-color)}.edgePath .path,.flowchart-link{stroke:var(--md-mermaid-edge-color);stroke-width:.05rem}.edgePath .arrowheadPath{fill:var(--md-mermaid-edge-color);stroke:none}.cluster rect{fill:var(--md-default-fg-color--lightest);stroke:var(--md-default-fg-color--lighter)}.cluster span{color:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}g #flowchart-circleEnd,g #flowchart-circleStart,g #flowchart-crossEnd,g #flowchart-crossStart,g #flowchart-pointEnd,g #flowchart-pointStart{stroke:none}g.classGroup line,g.classGroup rect{fill:var(--md-mermaid-node-bg-color);stroke:var(--md-mermaid-node-fg-color)}g.classGroup text{fill:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}.classLabel .box{fill:var(--md-mermaid-label-bg-color);background-color:var(--md-mermaid-label-bg-color);opacity:1}.classLabel .label{fill:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}.node .divider{stroke:var(--md-mermaid-node-fg-color)}.relation{stroke:var(--md-mermaid-edge-color)}.cardinality{fill:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}.cardinality text{fill:inherit!important}defs #classDiagram-compositionEnd,defs #classDiagram-compositionStart,defs #classDiagram-dependencyEnd,defs #classDiagram-dependencyStart,defs #classDiagram-extensionEnd,defs #classDiagram-extensionStart{fill:var(--md-mermaid-edge-color)!important;stroke:var(--md-mermaid-edge-color)!important}defs #classDiagram-aggregationEnd,defs #classDiagram-aggregationStart{fill:var(--md-mermaid-label-bg-color)!important;stroke:var(--md-mermaid-edge-color)!important}g.stateGroup rect{fill:var(--md-mermaid-node-bg-color);stroke:var(--md-mermaid-node-fg-color)}g.stateGroup .state-title{fill:var(--md-mermaid-label-fg-color)!important;font-family:var(--md-mermaid-font-family)}g.stateGroup .composit{fill:var(--md-mermaid-label-bg-color)}.nodeLabel,.nodeLabel p{color:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}a .nodeLabel{text-decoration:underline}.node circle.state-end,.node circle.state-start,.start-state{fill:var(--md-mermaid-edge-color);stroke:none}.end-state-inner,.end-state-outer{fill:var(--md-mermaid-edge-color)}.end-state-inner,.node circle.state-end{stroke:var(--md-mermaid-label-bg-color)}.transition{stroke:var(--md-mermaid-edge-color)}[id^=state-fork] rect,[id^=state-join] rect{fill:var(--md-mermaid-edge-color)!important;stroke:none!important}.statediagram-cluster.statediagram-cluster .inner{fill:var(--md-default-bg-color)}.statediagram-cluster rect{fill:var(--md-mermaid-node-bg-color);stroke:var(--md-mermaid-node-fg-color)}.statediagram-state rect.divider{fill:var(--md-default-fg-color--lightest);stroke:var(--md-default-fg-color--lighter)}defs #statediagram-barbEnd{stroke:var(--md-mermaid-edge-color)}.attributeBoxEven,.attributeBoxOdd{fill:var(--md-mermaid-node-bg-color);stroke:var(--md-mermaid-node-fg-color)}.entityBox{fill:var(--md-mermaid-label-bg-color);stroke:var(--md-mermaid-node-fg-color)}.entityLabel{fill:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}.relationshipLabelBox{fill:var(--md-mermaid-label-bg-color);fill-opacity:1;background-color:var(--md-mermaid-label-bg-color);opacity:1}.relationshipLabel{fill:var(--md-mermaid-label-fg-color)}.relationshipLine{stroke:var(--md-mermaid-edge-color)}defs #ONE_OR_MORE_END *,defs #ONE_OR_MORE_START *,defs #ONLY_ONE_END *,defs #ONLY_ONE_START *,defs #ZERO_OR_MORE_END *,defs #ZERO_OR_MORE_START *,defs #ZERO_OR_ONE_END *,defs #ZERO_OR_ONE_START *{stroke:var(--md-mermaid-edge-color)!important}defs #ZERO_OR_MORE_END circle,defs #ZERO_OR_MORE_START circle{fill:var(--md-mermaid-label-bg-color)}.actor{fill:var(--md-mermaid-sequence-actor-bg-color);stroke:var(--md-mermaid-sequence-actor-border-color)}text.actor>tspan{fill:var(--md-mermaid-sequence-actor-fg-color);font-family:var(--md-mermaid-font-family)}line{stroke:var(--md-mermaid-sequence-actor-line-color)}.actor-man circle,.actor-man line{fill:var(--md-mermaid-sequence-actorman-bg-color);stroke:var(--md-mermaid-sequence-actorman-line-color)}.messageLine0,.messageLine1{stroke:var(--md-mermaid-sequence-message-line-color)}.note{fill:var(--md-mermaid-sequence-note-bg-color);stroke:var(--md-mermaid-sequence-note-border-color)}.loopText,.loopText>tspan,.messageText,.noteText>tspan{stroke:none;font-family:var(--md-mermaid-font-family)!important}.messageText{fill:var(--md-mermaid-sequence-message-fg-color)}.loopText,.loopText>tspan{fill:var(--md-mermaid-sequence-loop-fg-color)}.noteText>tspan{fill:var(--md-mermaid-sequence-note-fg-color)}#arrowhead path{fill:var(--md-mermaid-sequence-message-line-color);stroke:none}.loopLine{fill:var(--md-mermaid-sequence-loop-bg-color);stroke:var(--md-mermaid-sequence-loop-border-color)}.labelBox{fill:var(--md-mermaid-sequence-label-bg-color);stroke:none}.labelText,.labelText>span{fill:var(--md-mermaid-sequence-label-fg-color);font-family:var(--md-mermaid-font-family)}.sequenceNumber{fill:var(--md-mermaid-sequence-number-fg-color)}rect.rect{fill:var(--md-mermaid-sequence-box-bg-color);stroke:none}rect.rect+text.text{fill:var(--md-mermaid-sequence-box-fg-color)}defs #sequencenumber{fill:var(--md-mermaid-sequence-number-bg-color)!important}";var Br,Da=0;function Va(){return typeof mermaid=="undefined"||mermaid instanceof Element?wt("https://unpkg.com/mermaid@10/dist/mermaid.min.js"):I(void 0)}function In(e){return e.classList.remove("mermaid"),Br||(Br=Va().pipe(E(()=>mermaid.initialize({startOnLoad:!1,themeCSS:Rn,sequence:{actorFontSize:"16px",messageFontSize:"16px",noteFontSize:"16px"}})),m(()=>{}),B(1))),Br.subscribe(()=>ao(this,null,function*(){e.classList.add("mermaid");let t=`__mermaid_${Da++}`,r=x("div",{class:"mermaid"}),o=e.textContent,{svg:n,fn:i}=yield mermaid.render(t,o),a=r.attachShadow({mode:"closed"});a.innerHTML=n,e.replaceWith(r),i==null||i(a)})),Br.pipe(m(()=>({ref:e})))}var Fn=x("table");function jn(e){return e.replaceWith(Fn),Fn.replaceWith(On(e)),I({ref:e})}function Na(e){let t=e.find(r=>r.checked)||e[0];return S(...e.map(r=>d(r,"change").pipe(m(()=>P(`label[for="${r.id}"]`))))).pipe(Q(P(`label[for="${t.id}"]`)),m(r=>({active:r})))}function Wn(e,{viewport$:t,target$:r}){let o=P(".tabbed-labels",e),n=$(":scope > input",e),i=Qr("prev");e.append(i);let a=Qr("next");return e.append(a),C(()=>{let s=new g,p=s.pipe(X(),ne(!0));z([s,ge(e)]).pipe(U(p),Le(1,me)).subscribe({next([{active:c},l]){let f=Ue(c),{width:u}=ce(c);e.style.setProperty("--md-indicator-x",`${f.x}px`),e.style.setProperty("--md-indicator-width",`${u}px`);let h=pr(o);(f.xh.x+l.width)&&o.scrollTo({left:Math.max(0,f.x-16),behavior:"smooth"})},complete(){e.style.removeProperty("--md-indicator-x"),e.style.removeProperty("--md-indicator-width")}}),z([De(o),ge(o)]).pipe(U(p)).subscribe(([c,l])=>{let f=Tt(o);i.hidden=c.x<16,a.hidden=c.x>f.width-l.width-16}),S(d(i,"click").pipe(m(()=>-1)),d(a,"click").pipe(m(()=>1))).pipe(U(p)).subscribe(c=>{let{width:l}=ce(o);o.scrollBy({left:l*c,behavior:"smooth"})}),r.pipe(U(p),v(c=>n.includes(c))).subscribe(c=>c.click()),o.classList.add("tabbed-labels--linked");for(let c of n){let l=P(`label[for="${c.id}"]`);l.replaceChildren(x("a",{href:`#${l.htmlFor}`,tabIndex:-1},...Array.from(l.childNodes))),d(l.firstElementChild,"click").pipe(U(p),v(f=>!(f.metaKey||f.ctrlKey)),E(f=>{f.preventDefault(),f.stopPropagation()})).subscribe(()=>{history.replaceState({},"",`#${l.htmlFor}`),l.click()})}return G("content.tabs.link")&&s.pipe(Ce(1),ee(t)).subscribe(([{active:c},{offset:l}])=>{let f=c.innerText.trim();if(c.hasAttribute("data-md-switching"))c.removeAttribute("data-md-switching");else{let u=e.offsetTop-l.y;for(let w of $("[data-tabs]"))for(let A of $(":scope > input",w)){let te=P(`label[for="${A.id}"]`);if(te!==c&&te.innerText.trim()===f){te.setAttribute("data-md-switching",""),A.click();break}}window.scrollTo({top:e.offsetTop-u});let h=__md_get("__tabs")||[];__md_set("__tabs",[...new Set([f,...h])])}}),s.pipe(U(p)).subscribe(()=>{for(let c of $("audio, video",e))c.pause()}),tt(e).pipe(b(()=>Na(n)),E(c=>s.next(c)),L(()=>s.complete()),m(c=>R({ref:e},c)))}).pipe(Qe(se))}function Un(e,{viewport$:t,target$:r,print$:o}){return S(...$(".annotate:not(.highlight)",e).map(n=>Cn(n,{target$:r,print$:o})),...$("pre:not(.mermaid) > code",e).map(n=>$n(n,{target$:r,print$:o})),...$("pre.mermaid",e).map(n=>In(n)),...$("table:not([class])",e).map(n=>jn(n)),...$("details",e).map(n=>Pn(n,{target$:r,print$:o})),...$("[data-tabs]",e).map(n=>Wn(n,{viewport$:t,target$:r})),...$("[title]",e).filter(()=>G("content.tooltips")).map(n=>lt(n,{viewport$:t})))}function za(e,{alert$:t}){return t.pipe(b(r=>S(I(!0),I(!1).pipe(Ge(2e3))).pipe(m(o=>({message:r,active:o})))))}function Dn(e,t){let r=P(".md-typeset",e);return C(()=>{let o=new g;return o.subscribe(({message:n,active:i})=>{e.classList.toggle("md-dialog--active",i),r.textContent=n}),za(e,t).pipe(E(n=>o.next(n)),L(()=>o.complete()),m(n=>R({ref:e},n)))})}var qa=0;function Qa(e,t){document.body.append(e);let{width:r}=ce(e);e.style.setProperty("--md-tooltip-width",`${r}px`),e.remove();let o=cr(t),n=typeof o!="undefined"?De(o):I({x:0,y:0}),i=S(et(t),kt(t)).pipe(K());return z([i,n]).pipe(m(([a,s])=>{let{x:p,y:c}=Ue(t),l=ce(t),f=t.closest("table");return f&&t.parentElement&&(p+=f.offsetLeft+t.parentElement.offsetLeft,c+=f.offsetTop+t.parentElement.offsetTop),{active:a,offset:{x:p-s.x+l.width/2-r/2,y:c-s.y+l.height+8}}}))}function Vn(e){let t=e.title;if(!t.length)return M;let r=`__tooltip_${qa++}`,o=Pt(r,"inline"),n=P(".md-typeset",o);return n.innerHTML=t,C(()=>{let i=new g;return i.subscribe({next({offset:a}){o.style.setProperty("--md-tooltip-x",`${a.x}px`),o.style.setProperty("--md-tooltip-y",`${a.y}px`)},complete(){o.style.removeProperty("--md-tooltip-x"),o.style.removeProperty("--md-tooltip-y")}}),S(i.pipe(v(({active:a})=>a)),i.pipe(_e(250),v(({active:a})=>!a))).subscribe({next({active:a}){a?(e.insertAdjacentElement("afterend",o),e.setAttribute("aria-describedby",r),e.removeAttribute("title")):(o.remove(),e.removeAttribute("aria-describedby"),e.setAttribute("title",t))},complete(){o.remove(),e.removeAttribute("aria-describedby"),e.setAttribute("title",t)}}),i.pipe(Le(16,me)).subscribe(({active:a})=>{o.classList.toggle("md-tooltip--active",a)}),i.pipe(ct(125,me),v(()=>!!e.offsetParent),m(()=>e.offsetParent.getBoundingClientRect()),m(({x:a})=>a)).subscribe({next(a){a?o.style.setProperty("--md-tooltip-0",`${-a}px`):o.style.removeProperty("--md-tooltip-0")},complete(){o.style.removeProperty("--md-tooltip-0")}}),Qa(o,e).pipe(E(a=>i.next(a)),L(()=>i.complete()),m(a=>R({ref:e},a)))}).pipe(Qe(se))}function Ka({viewport$:e}){if(!G("header.autohide"))return I(!1);let t=e.pipe(m(({offset:{y:n}})=>n),Ye(2,1),m(([n,i])=>[nMath.abs(i-n.y)>100),m(([,[n]])=>n),K()),o=Ve("search");return z([e,o]).pipe(m(([{offset:n},i])=>n.y>400&&!i),K(),b(n=>n?r:I(!1)),Q(!1))}function Nn(e,t){return C(()=>z([ge(e),Ka(t)])).pipe(m(([{height:r},o])=>({height:r,hidden:o})),K((r,o)=>r.height===o.height&&r.hidden===o.hidden),B(1))}function zn(e,{header$:t,main$:r}){return C(()=>{let o=new g,n=o.pipe(X(),ne(!0));o.pipe(Z("active"),We(t)).subscribe(([{active:a},{hidden:s}])=>{e.classList.toggle("md-header--shadow",a&&!s),e.hidden=s});let i=ue($("[title]",e)).pipe(v(()=>G("content.tooltips")),oe(a=>Vn(a)));return r.subscribe(o),t.pipe(U(n),m(a=>R({ref:e},a)),Pe(i.pipe(U(n))))})}function Ya(e,{viewport$:t,header$:r}){return mr(e,{viewport$:t,header$:r}).pipe(m(({offset:{y:o}})=>{let{height:n}=ce(e);return{active:o>=n}}),Z("active"))}function qn(e,t){return C(()=>{let r=new g;r.subscribe({next({active:n}){e.classList.toggle("md-header__title--active",n)},complete(){e.classList.remove("md-header__title--active")}});let o=fe(".md-content h1");return typeof o=="undefined"?M:Ya(o,t).pipe(E(n=>r.next(n)),L(()=>r.complete()),m(n=>R({ref:e},n)))})}function Qn(e,{viewport$:t,header$:r}){let o=r.pipe(m(({height:i})=>i),K()),n=o.pipe(b(()=>ge(e).pipe(m(({height:i})=>({top:e.offsetTop,bottom:e.offsetTop+i})),Z("bottom"))));return z([o,n,t]).pipe(m(([i,{top:a,bottom:s},{offset:{y:p},size:{height:c}}])=>(c=Math.max(0,c-Math.max(0,a-p,i)-Math.max(0,c+p-s)),{offset:a-i,height:c,active:a-i<=p})),K((i,a)=>i.offset===a.offset&&i.height===a.height&&i.active===a.active))}function Ba(e){let t=__md_get("__palette")||{index:e.findIndex(o=>matchMedia(o.getAttribute("data-md-color-media")).matches)},r=Math.max(0,Math.min(t.index,e.length-1));return I(...e).pipe(oe(o=>d(o,"change").pipe(m(()=>o))),Q(e[r]),m(o=>({index:e.indexOf(o),color:{media:o.getAttribute("data-md-color-media"),scheme:o.getAttribute("data-md-color-scheme"),primary:o.getAttribute("data-md-color-primary"),accent:o.getAttribute("data-md-color-accent")}})),B(1))}function Kn(e){let t=$("input",e),r=x("meta",{name:"theme-color"});document.head.appendChild(r);let o=x("meta",{name:"color-scheme"});document.head.appendChild(o);let n=$t("(prefers-color-scheme: light)");return C(()=>{let i=new g;return i.subscribe(a=>{if(document.body.setAttribute("data-md-color-switching",""),a.color.media==="(prefers-color-scheme)"){let s=matchMedia("(prefers-color-scheme: light)"),p=document.querySelector(s.matches?"[data-md-color-media='(prefers-color-scheme: light)']":"[data-md-color-media='(prefers-color-scheme: dark)']");a.color.scheme=p.getAttribute("data-md-color-scheme"),a.color.primary=p.getAttribute("data-md-color-primary"),a.color.accent=p.getAttribute("data-md-color-accent")}for(let[s,p]of Object.entries(a.color))document.body.setAttribute(`data-md-color-${s}`,p);for(let s=0;sa.key==="Enter"),ee(i,(a,s)=>s)).subscribe(({index:a})=>{a=(a+1)%t.length,t[a].click(),t[a].focus()}),i.pipe(m(()=>{let a=Se("header"),s=window.getComputedStyle(a);return o.content=s.colorScheme,s.backgroundColor.match(/\d+/g).map(p=>(+p).toString(16).padStart(2,"0")).join("")})).subscribe(a=>r.content=`#${a}`),i.pipe(be(se)).subscribe(()=>{document.body.removeAttribute("data-md-color-switching")}),Ba(t).pipe(U(n.pipe(Ce(1))),st(),E(a=>i.next(a)),L(()=>i.complete()),m(a=>R({ref:e},a)))})}function Yn(e,{progress$:t}){return C(()=>{let r=new g;return r.subscribe(({value:o})=>{e.style.setProperty("--md-progress-value",`${o}`)}),t.pipe(E(o=>r.next({value:o})),L(()=>r.complete()),m(o=>({ref:e,value:o})))})}var Gr=Vt(Yr());function Ga(e){e.setAttribute("data-md-copying","");let t=e.closest("[data-copy]"),r=t?t.getAttribute("data-copy"):e.innerText;return e.removeAttribute("data-md-copying"),r.trimEnd()}function Bn({alert$:e}){Gr.default.isSupported()&&new F(t=>{new Gr.default("[data-clipboard-target], [data-clipboard-text]",{text:r=>r.getAttribute("data-clipboard-text")||Ga(P(r.getAttribute("data-clipboard-target")))}).on("success",r=>t.next(r))}).pipe(E(t=>{t.trigger.focus()}),m(()=>Ee("clipboard.copied"))).subscribe(e)}function Gn(e,t){return e.protocol=t.protocol,e.hostname=t.hostname,e}function Ja(e,t){let r=new Map;for(let o of $("url",e)){let n=P("loc",o),i=[Gn(new URL(n.textContent),t)];r.set(`${i[0]}`,i);for(let a of $("[rel=alternate]",o)){let s=a.getAttribute("href");s!=null&&i.push(Gn(new URL(s),t))}}return r}function ur(e){return mn(new URL("sitemap.xml",e)).pipe(m(t=>Ja(t,new URL(e))),ve(()=>I(new Map)))}function Xa(e,t){if(!(e.target instanceof Element))return M;let r=e.target.closest("a");if(r===null)return M;if(r.target||e.metaKey||e.ctrlKey)return M;let o=new URL(r.href);return o.search=o.hash="",t.has(`${o}`)?(e.preventDefault(),I(new URL(r.href))):M}function Jn(e){let t=new Map;for(let r of $(":scope > *",e.head))t.set(r.outerHTML,r);return t}function Xn(e){for(let t of $("[href], [src]",e))for(let r of["href","src"]){let o=t.getAttribute(r);if(o&&!/^(?:[a-z]+:)?\/\//i.test(o)){t[r]=t[r];break}}return I(e)}function Za(e){for(let o of["[data-md-component=announce]","[data-md-component=container]","[data-md-component=header-topic]","[data-md-component=outdated]","[data-md-component=logo]","[data-md-component=skip]",...G("navigation.tabs.sticky")?["[data-md-component=tabs]"]:[]]){let n=fe(o),i=fe(o,e);typeof n!="undefined"&&typeof i!="undefined"&&n.replaceWith(i)}let t=Jn(document);for(let[o,n]of Jn(e))t.has(o)?t.delete(o):document.head.appendChild(n);for(let o of t.values()){let n=o.getAttribute("name");n!=="theme-color"&&n!=="color-scheme"&&o.remove()}let r=Se("container");return je($("script",r)).pipe(b(o=>{let n=e.createElement("script");if(o.src){for(let i of o.getAttributeNames())n.setAttribute(i,o.getAttribute(i));return o.replaceWith(n),new F(i=>{n.onload=()=>i.complete()})}else return n.textContent=o.textContent,o.replaceWith(n),M}),X(),ne(document))}function Zn({location$:e,viewport$:t,progress$:r}){let o=ye();if(location.protocol==="file:")return M;let n=ur(o.base);I(document).subscribe(Xn);let i=d(document.body,"click").pipe(We(n),b(([p,c])=>Xa(p,c)),pe()),a=d(window,"popstate").pipe(m(xe),pe());i.pipe(ee(t)).subscribe(([p,{offset:c}])=>{history.replaceState(c,""),history.pushState(null,"",p)}),S(i,a).subscribe(e);let s=e.pipe(Z("pathname"),b(p=>ln(p,{progress$:r}).pipe(ve(()=>(pt(p,!0),M)))),b(Xn),b(Za),pe());return S(s.pipe(ee(e,(p,c)=>c)),s.pipe(b(()=>e),Z("pathname"),b(()=>e),Z("hash")),e.pipe(K((p,c)=>p.pathname===c.pathname&&p.hash===c.hash),b(()=>i),E(()=>history.back()))).subscribe(p=>{var c,l;history.state!==null||!p.hash?window.scrollTo(0,(l=(c=history.state)==null?void 0:c.y)!=null?l:0):(history.scrollRestoration="auto",sn(p.hash),history.scrollRestoration="manual")}),e.subscribe(()=>{history.scrollRestoration="manual"}),d(window,"beforeunload").subscribe(()=>{history.scrollRestoration="auto"}),t.pipe(Z("offset"),_e(100)).subscribe(({offset:p})=>{history.replaceState(p,"")}),s}var ri=Vt(ti());function oi(e){let t=e.separator.split("|").map(n=>n.replace(/(\(\?[!=<][^)]+\))/g,"").length===0?"\uFFFD":n).join("|"),r=new RegExp(t,"img"),o=(n,i,a)=>`${i}${a}`;return n=>{n=n.replace(/[\s*+\-:~^]+/g," ").trim();let i=new RegExp(`(^|${e.separator}|)(${n.replace(/[|\\{}()[\]^$+*?.-]/g,"\\$&").replace(r,"|")})`,"img");return a=>(0,ri.default)(a).replace(i,o).replace(/<\/mark>(\s+)]*>/img,"$1")}}function It(e){return e.type===1}function dr(e){return e.type===3}function ni(e,t){let r=vn(e);return S(I(location.protocol!=="file:"),Ve("search")).pipe(Ae(o=>o),b(()=>t)).subscribe(({config:o,docs:n})=>r.next({type:0,data:{config:o,docs:n,options:{suggest:G("search.suggest")}}})),r}function ii({document$:e}){let t=ye(),r=Ne(new URL("../versions.json",t.base)).pipe(ve(()=>M)),o=r.pipe(m(n=>{let[,i]=t.base.match(/([^/]+)\/?$/);return n.find(({version:a,aliases:s})=>a===i||s.includes(i))||n[0]}));r.pipe(m(n=>new Map(n.map(i=>[`${new URL(`../${i.version}/`,t.base)}`,i]))),b(n=>d(document.body,"click").pipe(v(i=>!i.metaKey&&!i.ctrlKey),ee(o),b(([i,a])=>{if(i.target instanceof Element){let s=i.target.closest("a");if(s&&!s.target&&n.has(s.href)){let p=s.href;return!i.target.closest(".md-version")&&n.get(p)===a?M:(i.preventDefault(),I(p))}}return M}),b(i=>ur(new URL(i)).pipe(m(a=>{let p=xe().href.replace(t.base,i);return a.has(p.split("#")[0])?new URL(p):new URL(i)})))))).subscribe(n=>pt(n,!0)),z([r,o]).subscribe(([n,i])=>{P(".md-header__topic").appendChild(Mn(n,i))}),e.pipe(b(()=>o)).subscribe(n=>{var a;let i=__md_get("__outdated",sessionStorage);if(i===null){i=!0;let s=((a=t.version)==null?void 0:a.default)||"latest";Array.isArray(s)||(s=[s]);e:for(let p of s)for(let c of n.aliases.concat(n.version))if(new RegExp(p,"i").test(c)){i=!1;break e}__md_set("__outdated",i,sessionStorage)}if(i)for(let s of ae("outdated"))s.hidden=!1})}function ns(e,{worker$:t}){let{searchParams:r}=xe();r.has("q")&&(Je("search",!0),e.value=r.get("q"),e.focus(),Ve("search").pipe(Ae(i=>!i)).subscribe(()=>{let i=xe();i.searchParams.delete("q"),history.replaceState({},"",`${i}`)}));let o=et(e),n=S(t.pipe(Ae(It)),d(e,"keyup"),o).pipe(m(()=>e.value),K());return z([n,o]).pipe(m(([i,a])=>({value:i,focus:a})),B(1))}function ai(e,{worker$:t}){let r=new g,o=r.pipe(X(),ne(!0));z([t.pipe(Ae(It)),r],(i,a)=>a).pipe(Z("value")).subscribe(({value:i})=>t.next({type:2,data:i})),r.pipe(Z("focus")).subscribe(({focus:i})=>{i&&Je("search",i)}),d(e.form,"reset").pipe(U(o)).subscribe(()=>e.focus());let n=P("header [for=__search]");return d(n,"click").subscribe(()=>e.focus()),ns(e,{worker$:t}).pipe(E(i=>r.next(i)),L(()=>r.complete()),m(i=>R({ref:e},i)),B(1))}function si(e,{worker$:t,query$:r}){let o=new g,n=tn(e.parentElement).pipe(v(Boolean)),i=e.parentElement,a=P(":scope > :first-child",e),s=P(":scope > :last-child",e);Ve("search").subscribe(l=>s.setAttribute("role",l?"list":"presentation")),o.pipe(ee(r),Ur(t.pipe(Ae(It)))).subscribe(([{items:l},{value:f}])=>{switch(l.length){case 0:a.textContent=f.length?Ee("search.result.none"):Ee("search.result.placeholder");break;case 1:a.textContent=Ee("search.result.one");break;default:let u=sr(l.length);a.textContent=Ee("search.result.other",u)}});let p=o.pipe(E(()=>s.innerHTML=""),b(({items:l})=>S(I(...l.slice(0,10)),I(...l.slice(10)).pipe(Ye(4),Vr(n),b(([f])=>f)))),m(Tn),pe());return p.subscribe(l=>s.appendChild(l)),p.pipe(oe(l=>{let f=fe("details",l);return typeof f=="undefined"?M:d(f,"toggle").pipe(U(o),m(()=>f))})).subscribe(l=>{l.open===!1&&l.offsetTop<=i.scrollTop&&i.scrollTo({top:l.offsetTop})}),t.pipe(v(dr),m(({data:l})=>l)).pipe(E(l=>o.next(l)),L(()=>o.complete()),m(l=>R({ref:e},l)))}function is(e,{query$:t}){return t.pipe(m(({value:r})=>{let o=xe();return o.hash="",r=r.replace(/\s+/g,"+").replace(/&/g,"%26").replace(/=/g,"%3D"),o.search=`q=${r}`,{url:o}}))}function ci(e,t){let r=new g,o=r.pipe(X(),ne(!0));return r.subscribe(({url:n})=>{e.setAttribute("data-clipboard-text",e.href),e.href=`${n}`}),d(e,"click").pipe(U(o)).subscribe(n=>n.preventDefault()),is(e,t).pipe(E(n=>r.next(n)),L(()=>r.complete()),m(n=>R({ref:e},n)))}function pi(e,{worker$:t,keyboard$:r}){let o=new g,n=Se("search-query"),i=S(d(n,"keydown"),d(n,"focus")).pipe(be(se),m(()=>n.value),K());return o.pipe(We(i),m(([{suggest:s},p])=>{let c=p.split(/([\s-]+)/);if(s!=null&&s.length&&c[c.length-1]){let l=s[s.length-1];l.startsWith(c[c.length-1])&&(c[c.length-1]=l)}else c.length=0;return c})).subscribe(s=>e.innerHTML=s.join("").replace(/\s/g," ")),r.pipe(v(({mode:s})=>s==="search")).subscribe(s=>{switch(s.type){case"ArrowRight":e.innerText.length&&n.selectionStart===n.value.length&&(n.value=e.innerText);break}}),t.pipe(v(dr),m(({data:s})=>s)).pipe(E(s=>o.next(s)),L(()=>o.complete()),m(()=>({ref:e})))}function li(e,{index$:t,keyboard$:r}){let o=ye();try{let n=ni(o.search,t),i=Se("search-query",e),a=Se("search-result",e);d(e,"click").pipe(v(({target:p})=>p instanceof Element&&!!p.closest("a"))).subscribe(()=>Je("search",!1)),r.pipe(v(({mode:p})=>p==="search")).subscribe(p=>{let c=Re();switch(p.type){case"Enter":if(c===i){let l=new Map;for(let f of $(":first-child [href]",a)){let u=f.firstElementChild;l.set(f,parseFloat(u.getAttribute("data-md-score")))}if(l.size){let[[f]]=[...l].sort(([,u],[,h])=>h-u);f.click()}p.claim()}break;case"Escape":case"Tab":Je("search",!1),i.blur();break;case"ArrowUp":case"ArrowDown":if(typeof c=="undefined")i.focus();else{let l=[i,...$(":not(details) > [href], summary, details[open] [href]",a)],f=Math.max(0,(Math.max(0,l.indexOf(c))+l.length+(p.type==="ArrowUp"?-1:1))%l.length);l[f].focus()}p.claim();break;default:i!==Re()&&i.focus()}}),r.pipe(v(({mode:p})=>p==="global")).subscribe(p=>{switch(p.type){case"f":case"s":case"/":i.focus(),i.select(),p.claim();break}});let s=ai(i,{worker$:n});return S(s,si(a,{worker$:n,query$:s})).pipe(Pe(...ae("search-share",e).map(p=>ci(p,{query$:s})),...ae("search-suggest",e).map(p=>pi(p,{worker$:n,keyboard$:r}))))}catch(n){return e.hidden=!0,Ke}}function mi(e,{index$:t,location$:r}){return z([t,r.pipe(Q(xe()),v(o=>!!o.searchParams.get("h")))]).pipe(m(([o,n])=>oi(o.config)(n.searchParams.get("h"))),m(o=>{var a;let n=new Map,i=document.createNodeIterator(e,NodeFilter.SHOW_TEXT);for(let s=i.nextNode();s;s=i.nextNode())if((a=s.parentElement)!=null&&a.offsetHeight){let p=s.textContent,c=o(p);c.length>p.length&&n.set(s,c)}for(let[s,p]of n){let{childNodes:c}=x("span",null,p);s.replaceWith(...Array.from(c))}return{ref:e,nodes:n}}))}function as(e,{viewport$:t,main$:r}){let o=e.closest(".md-grid"),n=o.offsetTop-o.parentElement.offsetTop;return z([r,t]).pipe(m(([{offset:i,height:a},{offset:{y:s}}])=>(a=a+Math.min(n,Math.max(0,s-i))-n,{height:a,locked:s>=i+n})),K((i,a)=>i.height===a.height&&i.locked===a.locked))}function Jr(e,o){var n=o,{header$:t}=n,r=io(n,["header$"]);let i=P(".md-sidebar__scrollwrap",e),{y:a}=Ue(i);return C(()=>{let s=new g,p=s.pipe(X(),ne(!0)),c=s.pipe(Le(0,me));return c.pipe(ee(t)).subscribe({next([{height:l},{height:f}]){i.style.height=`${l-2*a}px`,e.style.top=`${f}px`},complete(){i.style.height="",e.style.top=""}}),c.pipe(Ae()).subscribe(()=>{for(let l of $(".md-nav__link--active[href]",e)){if(!l.clientHeight)continue;let f=l.closest(".md-sidebar__scrollwrap");if(typeof f!="undefined"){let u=l.offsetTop-f.offsetTop,{height:h}=ce(f);f.scrollTo({top:u-h/2})}}}),ue($("label[tabindex]",e)).pipe(oe(l=>d(l,"click").pipe(be(se),m(()=>l),U(p)))).subscribe(l=>{let f=P(`[id="${l.htmlFor}"]`);P(`[aria-labelledby="${l.id}"]`).setAttribute("aria-expanded",`${f.checked}`)}),as(e,r).pipe(E(l=>s.next(l)),L(()=>s.complete()),m(l=>R({ref:e},l)))})}function fi(e,t){if(typeof t!="undefined"){let r=`https://api.github.com/repos/${e}/${t}`;return Ct(Ne(`${r}/releases/latest`).pipe(ve(()=>M),m(o=>({version:o.tag_name})),Be({})),Ne(r).pipe(ve(()=>M),m(o=>({stars:o.stargazers_count,forks:o.forks_count})),Be({}))).pipe(m(([o,n])=>R(R({},o),n)))}else{let r=`https://api.github.com/users/${e}`;return Ne(r).pipe(m(o=>({repositories:o.public_repos})),Be({}))}}function ui(e,t){let r=`https://${e}/api/v4/projects/${encodeURIComponent(t)}`;return Ne(r).pipe(ve(()=>M),m(({star_count:o,forks_count:n})=>({stars:o,forks:n})),Be({}))}function di(e){let t=e.match(/^.+github\.com\/([^/]+)\/?([^/]+)?/i);if(t){let[,r,o]=t;return fi(r,o)}if(t=e.match(/^.+?([^/]*gitlab[^/]+)\/(.+?)\/?$/i),t){let[,r,o]=t;return ui(r,o)}return M}var ss;function cs(e){return ss||(ss=C(()=>{let t=__md_get("__source",sessionStorage);if(t)return I(t);if(ae("consent").length){let o=__md_get("__consent");if(!(o&&o.github))return M}return di(e.href).pipe(E(o=>__md_set("__source",o,sessionStorage)))}).pipe(ve(()=>M),v(t=>Object.keys(t).length>0),m(t=>({facts:t})),B(1)))}function hi(e){let t=P(":scope > :last-child",e);return C(()=>{let r=new g;return r.subscribe(({facts:o})=>{t.appendChild(Sn(o)),t.classList.add("md-source__repository--active")}),cs(e).pipe(E(o=>r.next(o)),L(()=>r.complete()),m(o=>R({ref:e},o)))})}function ps(e,{viewport$:t,header$:r}){return ge(document.body).pipe(b(()=>mr(e,{header$:r,viewport$:t})),m(({offset:{y:o}})=>({hidden:o>=10})),Z("hidden"))}function bi(e,t){return C(()=>{let r=new g;return r.subscribe({next({hidden:o}){e.hidden=o},complete(){e.hidden=!1}}),(G("navigation.tabs.sticky")?I({hidden:!1}):ps(e,t)).pipe(E(o=>r.next(o)),L(()=>r.complete()),m(o=>R({ref:e},o)))})}function ls(e,{viewport$:t,header$:r}){let o=new Map,n=$(".md-nav__link",e);for(let s of n){let p=decodeURIComponent(s.hash.substring(1)),c=fe(`[id="${p}"]`);typeof c!="undefined"&&o.set(s,c)}let i=r.pipe(Z("height"),m(({height:s})=>{let p=Se("main"),c=P(":scope > :first-child",p);return s+.8*(c.offsetTop-p.offsetTop)}),pe());return ge(document.body).pipe(Z("height"),b(s=>C(()=>{let p=[];return I([...o].reduce((c,[l,f])=>{for(;p.length&&o.get(p[p.length-1]).tagName>=f.tagName;)p.pop();let u=f.offsetTop;for(;!u&&f.parentElement;)f=f.parentElement,u=f.offsetTop;let h=f.offsetParent;for(;h;h=h.offsetParent)u+=h.offsetTop;return c.set([...p=[...p,l]].reverse(),u)},new Map))}).pipe(m(p=>new Map([...p].sort(([,c],[,l])=>c-l))),We(i),b(([p,c])=>t.pipe(jr(([l,f],{offset:{y:u},size:h})=>{let w=u+h.height>=Math.floor(s.height);for(;f.length;){let[,A]=f[0];if(A-c=u&&!w)f=[l.pop(),...f];else break}return[l,f]},[[],[...p]]),K((l,f)=>l[0]===f[0]&&l[1]===f[1])))))).pipe(m(([s,p])=>({prev:s.map(([c])=>c),next:p.map(([c])=>c)})),Q({prev:[],next:[]}),Ye(2,1),m(([s,p])=>s.prev.length{let i=new g,a=i.pipe(X(),ne(!0));if(i.subscribe(({prev:s,next:p})=>{for(let[c]of p)c.classList.remove("md-nav__link--passed"),c.classList.remove("md-nav__link--active");for(let[c,[l]]of s.entries())l.classList.add("md-nav__link--passed"),l.classList.toggle("md-nav__link--active",c===s.length-1)}),G("toc.follow")){let s=S(t.pipe(_e(1),m(()=>{})),t.pipe(_e(250),m(()=>"smooth")));i.pipe(v(({prev:p})=>p.length>0),We(o.pipe(be(se))),ee(s)).subscribe(([[{prev:p}],c])=>{let[l]=p[p.length-1];if(l.offsetHeight){let f=cr(l);if(typeof f!="undefined"){let u=l.offsetTop-f.offsetTop,{height:h}=ce(f);f.scrollTo({top:u-h/2,behavior:c})}}})}return G("navigation.tracking")&&t.pipe(U(a),Z("offset"),_e(250),Ce(1),U(n.pipe(Ce(1))),st({delay:250}),ee(i)).subscribe(([,{prev:s}])=>{let p=xe(),c=s[s.length-1];if(c&&c.length){let[l]=c,{hash:f}=new URL(l.href);p.hash!==f&&(p.hash=f,history.replaceState({},"",`${p}`))}else p.hash="",history.replaceState({},"",`${p}`)}),ls(e,{viewport$:t,header$:r}).pipe(E(s=>i.next(s)),L(()=>i.complete()),m(s=>R({ref:e},s)))})}function ms(e,{viewport$:t,main$:r,target$:o}){let n=t.pipe(m(({offset:{y:a}})=>a),Ye(2,1),m(([a,s])=>a>s&&s>0),K()),i=r.pipe(m(({active:a})=>a));return z([i,n]).pipe(m(([a,s])=>!(a&&s)),K(),U(o.pipe(Ce(1))),ne(!0),st({delay:250}),m(a=>({hidden:a})))}function gi(e,{viewport$:t,header$:r,main$:o,target$:n}){let i=new g,a=i.pipe(X(),ne(!0));return i.subscribe({next({hidden:s}){e.hidden=s,s?(e.setAttribute("tabindex","-1"),e.blur()):e.removeAttribute("tabindex")},complete(){e.style.top="",e.hidden=!0,e.removeAttribute("tabindex")}}),r.pipe(U(a),Z("height")).subscribe(({height:s})=>{e.style.top=`${s+16}px`}),d(e,"click").subscribe(s=>{s.preventDefault(),window.scrollTo({top:0})}),ms(e,{viewport$:t,main$:o,target$:n}).pipe(E(s=>i.next(s)),L(()=>i.complete()),m(s=>R({ref:e},s)))}function xi({document$:e,viewport$:t}){e.pipe(b(()=>$(".md-ellipsis")),oe(r=>tt(r).pipe(U(e.pipe(Ce(1))),v(o=>o),m(()=>r),Te(1))),v(r=>r.offsetWidth{let o=r.innerText,n=r.closest("a")||r;return n.title=o,lt(n,{viewport$:t}).pipe(U(e.pipe(Ce(1))),L(()=>n.removeAttribute("title")))})).subscribe(),e.pipe(b(()=>$(".md-status")),oe(r=>lt(r,{viewport$:t}))).subscribe()}function yi({document$:e,tablet$:t}){e.pipe(b(()=>$(".md-toggle--indeterminate")),E(r=>{r.indeterminate=!0,r.checked=!1}),oe(r=>d(r,"change").pipe(Dr(()=>r.classList.contains("md-toggle--indeterminate")),m(()=>r))),ee(t)).subscribe(([r,o])=>{r.classList.remove("md-toggle--indeterminate"),o&&(r.checked=!1)})}function fs(){return/(iPad|iPhone|iPod)/.test(navigator.userAgent)}function Ei({document$:e}){e.pipe(b(()=>$("[data-md-scrollfix]")),E(t=>t.removeAttribute("data-md-scrollfix")),v(fs),oe(t=>d(t,"touchstart").pipe(m(()=>t)))).subscribe(t=>{let r=t.scrollTop;r===0?t.scrollTop=1:r+t.offsetHeight===t.scrollHeight&&(t.scrollTop=r-1)})}function wi({viewport$:e,tablet$:t}){z([Ve("search"),t]).pipe(m(([r,o])=>r&&!o),b(r=>I(r).pipe(Ge(r?400:100))),ee(e)).subscribe(([r,{offset:{y:o}}])=>{if(r)document.body.setAttribute("data-md-scrolllock",""),document.body.style.top=`-${o}px`;else{let n=-1*parseInt(document.body.style.top,10);document.body.removeAttribute("data-md-scrolllock"),document.body.style.top="",n&&window.scrollTo(0,n)}})}Object.entries||(Object.entries=function(e){let t=[];for(let r of Object.keys(e))t.push([r,e[r]]);return t});Object.values||(Object.values=function(e){let t=[];for(let r of Object.keys(e))t.push(e[r]);return t});typeof Element!="undefined"&&(Element.prototype.scrollTo||(Element.prototype.scrollTo=function(e,t){typeof e=="object"?(this.scrollLeft=e.left,this.scrollTop=e.top):(this.scrollLeft=e,this.scrollTop=t)}),Element.prototype.replaceWith||(Element.prototype.replaceWith=function(...e){let t=this.parentNode;if(t){e.length===0&&t.removeChild(this);for(let r=e.length-1;r>=0;r--){let o=e[r];typeof o=="string"?o=document.createTextNode(o):o.parentNode&&o.parentNode.removeChild(o),r?t.insertBefore(this.previousSibling,o):t.replaceChild(o,this)}}}));function us(){return location.protocol==="file:"?wt(`${new URL("search/search_index.js",Xr.base)}`).pipe(m(()=>__index),B(1)):Ne(new URL("search/search_index.json",Xr.base))}document.documentElement.classList.remove("no-js");document.documentElement.classList.add("js");var ot=Yo(),jt=nn(),Ot=cn(jt),Zr=on(),Oe=bn(),hr=$t("(min-width: 960px)"),Si=$t("(min-width: 1220px)"),Oi=pn(),Xr=ye(),Mi=document.forms.namedItem("search")?us():Ke,eo=new g;Bn({alert$:eo});var to=new g;G("navigation.instant")&&Zn({location$:jt,viewport$:Oe,progress$:to}).subscribe(ot);var Ti;((Ti=Xr.version)==null?void 0:Ti.provider)==="mike"&&ii({document$:ot});S(jt,Ot).pipe(Ge(125)).subscribe(()=>{Je("drawer",!1),Je("search",!1)});Zr.pipe(v(({mode:e})=>e==="global")).subscribe(e=>{switch(e.type){case"p":case",":let t=fe("link[rel=prev]");typeof t!="undefined"&&pt(t);break;case"n":case".":let r=fe("link[rel=next]");typeof r!="undefined"&&pt(r);break;case"Enter":let o=Re();o instanceof HTMLLabelElement&&o.click()}});xi({viewport$:Oe,document$:ot});yi({document$:ot,tablet$:hr});Ei({document$:ot});wi({viewport$:Oe,tablet$:hr});var rt=Nn(Se("header"),{viewport$:Oe}),Ft=ot.pipe(m(()=>Se("main")),b(e=>Qn(e,{viewport$:Oe,header$:rt})),B(1)),ds=S(...ae("consent").map(e=>xn(e,{target$:Ot})),...ae("dialog").map(e=>Dn(e,{alert$:eo})),...ae("header").map(e=>zn(e,{viewport$:Oe,header$:rt,main$:Ft})),...ae("palette").map(e=>Kn(e)),...ae("progress").map(e=>Yn(e,{progress$:to})),...ae("search").map(e=>li(e,{index$:Mi,keyboard$:Zr})),...ae("source").map(e=>hi(e))),hs=C(()=>S(...ae("announce").map(e=>gn(e)),...ae("content").map(e=>Un(e,{viewport$:Oe,target$:Ot,print$:Oi})),...ae("content").map(e=>G("search.highlight")?mi(e,{index$:Mi,location$:jt}):M),...ae("header-title").map(e=>qn(e,{viewport$:Oe,header$:rt})),...ae("sidebar").map(e=>e.getAttribute("data-md-type")==="navigation"?Nr(Si,()=>Jr(e,{viewport$:Oe,header$:rt,main$:Ft})):Nr(hr,()=>Jr(e,{viewport$:Oe,header$:rt,main$:Ft}))),...ae("tabs").map(e=>bi(e,{viewport$:Oe,header$:rt})),...ae("toc").map(e=>vi(e,{viewport$:Oe,header$:rt,main$:Ft,target$:Ot})),...ae("top").map(e=>gi(e,{viewport$:Oe,header$:rt,main$:Ft,target$:Ot})))),Li=ot.pipe(b(()=>hs),Pe(ds),B(1));Li.subscribe();window.document$=ot;window.location$=jt;window.target$=Ot;window.keyboard$=Zr;window.viewport$=Oe;window.tablet$=hr;window.screen$=Si;window.print$=Oi;window.alert$=eo;window.progress$=to;window.component$=Li;})(); +//# sourceMappingURL=bundle.081f42fc.min.js.map + diff --git a/2.0.0a3/assets/javascripts/bundle.081f42fc.min.js.map b/2.0.0a3/assets/javascripts/bundle.081f42fc.min.js.map new file mode 100644 index 00000000..e055db5a --- /dev/null +++ b/2.0.0a3/assets/javascripts/bundle.081f42fc.min.js.map @@ -0,0 +1,7 @@ +{ + "version": 3, + "sources": ["node_modules/focus-visible/dist/focus-visible.js", "node_modules/clipboard/dist/clipboard.js", "node_modules/escape-html/index.js", "src/templates/assets/javascripts/bundle.ts", "node_modules/rxjs/node_modules/tslib/tslib.es6.js", "node_modules/rxjs/src/internal/util/isFunction.ts", "node_modules/rxjs/src/internal/util/createErrorClass.ts", "node_modules/rxjs/src/internal/util/UnsubscriptionError.ts", "node_modules/rxjs/src/internal/util/arrRemove.ts", "node_modules/rxjs/src/internal/Subscription.ts", "node_modules/rxjs/src/internal/config.ts", "node_modules/rxjs/src/internal/scheduler/timeoutProvider.ts", "node_modules/rxjs/src/internal/util/reportUnhandledError.ts", "node_modules/rxjs/src/internal/util/noop.ts", "node_modules/rxjs/src/internal/NotificationFactories.ts", "node_modules/rxjs/src/internal/util/errorContext.ts", "node_modules/rxjs/src/internal/Subscriber.ts", "node_modules/rxjs/src/internal/symbol/observable.ts", "node_modules/rxjs/src/internal/util/identity.ts", "node_modules/rxjs/src/internal/util/pipe.ts", "node_modules/rxjs/src/internal/Observable.ts", "node_modules/rxjs/src/internal/util/lift.ts", "node_modules/rxjs/src/internal/operators/OperatorSubscriber.ts", "node_modules/rxjs/src/internal/scheduler/animationFrameProvider.ts", "node_modules/rxjs/src/internal/util/ObjectUnsubscribedError.ts", "node_modules/rxjs/src/internal/Subject.ts", "node_modules/rxjs/src/internal/BehaviorSubject.ts", "node_modules/rxjs/src/internal/scheduler/dateTimestampProvider.ts", "node_modules/rxjs/src/internal/ReplaySubject.ts", "node_modules/rxjs/src/internal/scheduler/Action.ts", "node_modules/rxjs/src/internal/scheduler/intervalProvider.ts", "node_modules/rxjs/src/internal/scheduler/AsyncAction.ts", "node_modules/rxjs/src/internal/Scheduler.ts", "node_modules/rxjs/src/internal/scheduler/AsyncScheduler.ts", "node_modules/rxjs/src/internal/scheduler/async.ts", "node_modules/rxjs/src/internal/scheduler/QueueAction.ts", "node_modules/rxjs/src/internal/scheduler/QueueScheduler.ts", "node_modules/rxjs/src/internal/scheduler/queue.ts", "node_modules/rxjs/src/internal/scheduler/AnimationFrameAction.ts", "node_modules/rxjs/src/internal/scheduler/AnimationFrameScheduler.ts", "node_modules/rxjs/src/internal/scheduler/animationFrame.ts", "node_modules/rxjs/src/internal/observable/empty.ts", "node_modules/rxjs/src/internal/util/isScheduler.ts", "node_modules/rxjs/src/internal/util/args.ts", "node_modules/rxjs/src/internal/util/isArrayLike.ts", "node_modules/rxjs/src/internal/util/isPromise.ts", "node_modules/rxjs/src/internal/util/isInteropObservable.ts", "node_modules/rxjs/src/internal/util/isAsyncIterable.ts", "node_modules/rxjs/src/internal/util/throwUnobservableError.ts", "node_modules/rxjs/src/internal/symbol/iterator.ts", "node_modules/rxjs/src/internal/util/isIterable.ts", "node_modules/rxjs/src/internal/util/isReadableStreamLike.ts", "node_modules/rxjs/src/internal/observable/innerFrom.ts", "node_modules/rxjs/src/internal/util/executeSchedule.ts", "node_modules/rxjs/src/internal/operators/observeOn.ts", "node_modules/rxjs/src/internal/operators/subscribeOn.ts", "node_modules/rxjs/src/internal/scheduled/scheduleObservable.ts", "node_modules/rxjs/src/internal/scheduled/schedulePromise.ts", "node_modules/rxjs/src/internal/scheduled/scheduleArray.ts", "node_modules/rxjs/src/internal/scheduled/scheduleIterable.ts", "node_modules/rxjs/src/internal/scheduled/scheduleAsyncIterable.ts", "node_modules/rxjs/src/internal/scheduled/scheduleReadableStreamLike.ts", "node_modules/rxjs/src/internal/scheduled/scheduled.ts", "node_modules/rxjs/src/internal/observable/from.ts", "node_modules/rxjs/src/internal/observable/of.ts", "node_modules/rxjs/src/internal/observable/throwError.ts", "node_modules/rxjs/src/internal/util/EmptyError.ts", "node_modules/rxjs/src/internal/util/isDate.ts", "node_modules/rxjs/src/internal/operators/map.ts", "node_modules/rxjs/src/internal/util/mapOneOrManyArgs.ts", "node_modules/rxjs/src/internal/util/argsArgArrayOrObject.ts", "node_modules/rxjs/src/internal/util/createObject.ts", "node_modules/rxjs/src/internal/observable/combineLatest.ts", "node_modules/rxjs/src/internal/operators/mergeInternals.ts", "node_modules/rxjs/src/internal/operators/mergeMap.ts", "node_modules/rxjs/src/internal/operators/mergeAll.ts", "node_modules/rxjs/src/internal/operators/concatAll.ts", "node_modules/rxjs/src/internal/observable/concat.ts", "node_modules/rxjs/src/internal/observable/defer.ts", "node_modules/rxjs/src/internal/observable/fromEvent.ts", "node_modules/rxjs/src/internal/observable/fromEventPattern.ts", "node_modules/rxjs/src/internal/observable/timer.ts", "node_modules/rxjs/src/internal/observable/merge.ts", "node_modules/rxjs/src/internal/observable/never.ts", "node_modules/rxjs/src/internal/util/argsOrArgArray.ts", "node_modules/rxjs/src/internal/operators/filter.ts", "node_modules/rxjs/src/internal/observable/zip.ts", "node_modules/rxjs/src/internal/operators/audit.ts", "node_modules/rxjs/src/internal/operators/auditTime.ts", "node_modules/rxjs/src/internal/operators/bufferCount.ts", "node_modules/rxjs/src/internal/operators/catchError.ts", "node_modules/rxjs/src/internal/operators/scanInternals.ts", "node_modules/rxjs/src/internal/operators/combineLatest.ts", "node_modules/rxjs/src/internal/operators/combineLatestWith.ts", "node_modules/rxjs/src/internal/operators/debounce.ts", "node_modules/rxjs/src/internal/operators/debounceTime.ts", "node_modules/rxjs/src/internal/operators/defaultIfEmpty.ts", "node_modules/rxjs/src/internal/operators/take.ts", "node_modules/rxjs/src/internal/operators/ignoreElements.ts", "node_modules/rxjs/src/internal/operators/mapTo.ts", "node_modules/rxjs/src/internal/operators/delayWhen.ts", "node_modules/rxjs/src/internal/operators/delay.ts", "node_modules/rxjs/src/internal/operators/distinctUntilChanged.ts", "node_modules/rxjs/src/internal/operators/distinctUntilKeyChanged.ts", "node_modules/rxjs/src/internal/operators/throwIfEmpty.ts", "node_modules/rxjs/src/internal/operators/endWith.ts", "node_modules/rxjs/src/internal/operators/finalize.ts", "node_modules/rxjs/src/internal/operators/first.ts", "node_modules/rxjs/src/internal/operators/takeLast.ts", "node_modules/rxjs/src/internal/operators/merge.ts", "node_modules/rxjs/src/internal/operators/mergeWith.ts", "node_modules/rxjs/src/internal/operators/repeat.ts", "node_modules/rxjs/src/internal/operators/scan.ts", "node_modules/rxjs/src/internal/operators/share.ts", "node_modules/rxjs/src/internal/operators/shareReplay.ts", "node_modules/rxjs/src/internal/operators/skip.ts", "node_modules/rxjs/src/internal/operators/skipUntil.ts", "node_modules/rxjs/src/internal/operators/startWith.ts", "node_modules/rxjs/src/internal/operators/switchMap.ts", "node_modules/rxjs/src/internal/operators/takeUntil.ts", "node_modules/rxjs/src/internal/operators/takeWhile.ts", "node_modules/rxjs/src/internal/operators/tap.ts", "node_modules/rxjs/src/internal/operators/throttle.ts", "node_modules/rxjs/src/internal/operators/throttleTime.ts", "node_modules/rxjs/src/internal/operators/withLatestFrom.ts", "node_modules/rxjs/src/internal/operators/zip.ts", "node_modules/rxjs/src/internal/operators/zipWith.ts", "src/templates/assets/javascripts/browser/document/index.ts", "src/templates/assets/javascripts/browser/element/_/index.ts", "src/templates/assets/javascripts/browser/element/focus/index.ts", "src/templates/assets/javascripts/browser/element/hover/index.ts", "src/templates/assets/javascripts/utilities/h/index.ts", "src/templates/assets/javascripts/utilities/round/index.ts", "src/templates/assets/javascripts/browser/script/index.ts", "src/templates/assets/javascripts/browser/element/size/_/index.ts", "src/templates/assets/javascripts/browser/element/size/content/index.ts", "src/templates/assets/javascripts/browser/element/offset/_/index.ts", "src/templates/assets/javascripts/browser/element/offset/content/index.ts", "src/templates/assets/javascripts/browser/element/visibility/index.ts", "src/templates/assets/javascripts/browser/toggle/index.ts", "src/templates/assets/javascripts/browser/keyboard/index.ts", "src/templates/assets/javascripts/browser/location/_/index.ts", "src/templates/assets/javascripts/browser/location/hash/index.ts", "src/templates/assets/javascripts/browser/media/index.ts", "src/templates/assets/javascripts/browser/request/index.ts", "src/templates/assets/javascripts/browser/viewport/offset/index.ts", "src/templates/assets/javascripts/browser/viewport/size/index.ts", "src/templates/assets/javascripts/browser/viewport/_/index.ts", "src/templates/assets/javascripts/browser/viewport/at/index.ts", "src/templates/assets/javascripts/browser/worker/index.ts", "src/templates/assets/javascripts/_/index.ts", "src/templates/assets/javascripts/components/_/index.ts", "src/templates/assets/javascripts/components/announce/index.ts", "src/templates/assets/javascripts/components/consent/index.ts", "src/templates/assets/javascripts/templates/tooltip/index.tsx", "src/templates/assets/javascripts/templates/annotation/index.tsx", "src/templates/assets/javascripts/templates/clipboard/index.tsx", "src/templates/assets/javascripts/templates/search/index.tsx", "src/templates/assets/javascripts/templates/source/index.tsx", "src/templates/assets/javascripts/templates/tabbed/index.tsx", "src/templates/assets/javascripts/templates/table/index.tsx", "src/templates/assets/javascripts/templates/version/index.tsx", "src/templates/assets/javascripts/components/tooltip2/index.ts", "src/templates/assets/javascripts/components/content/annotation/_/index.ts", "src/templates/assets/javascripts/components/content/annotation/list/index.ts", "src/templates/assets/javascripts/components/content/annotation/block/index.ts", "src/templates/assets/javascripts/components/content/code/_/index.ts", "src/templates/assets/javascripts/components/content/details/index.ts", "src/templates/assets/javascripts/components/content/mermaid/index.css", "src/templates/assets/javascripts/components/content/mermaid/index.ts", "src/templates/assets/javascripts/components/content/table/index.ts", "src/templates/assets/javascripts/components/content/tabs/index.ts", "src/templates/assets/javascripts/components/content/_/index.ts", "src/templates/assets/javascripts/components/dialog/index.ts", "src/templates/assets/javascripts/components/tooltip/index.ts", "src/templates/assets/javascripts/components/header/_/index.ts", "src/templates/assets/javascripts/components/header/title/index.ts", "src/templates/assets/javascripts/components/main/index.ts", "src/templates/assets/javascripts/components/palette/index.ts", "src/templates/assets/javascripts/components/progress/index.ts", "src/templates/assets/javascripts/integrations/clipboard/index.ts", "src/templates/assets/javascripts/integrations/sitemap/index.ts", "src/templates/assets/javascripts/integrations/instant/index.ts", "src/templates/assets/javascripts/integrations/search/highlighter/index.ts", "src/templates/assets/javascripts/integrations/search/worker/message/index.ts", "src/templates/assets/javascripts/integrations/search/worker/_/index.ts", "src/templates/assets/javascripts/integrations/version/index.ts", "src/templates/assets/javascripts/components/search/query/index.ts", "src/templates/assets/javascripts/components/search/result/index.ts", "src/templates/assets/javascripts/components/search/share/index.ts", "src/templates/assets/javascripts/components/search/suggest/index.ts", "src/templates/assets/javascripts/components/search/_/index.ts", "src/templates/assets/javascripts/components/search/highlight/index.ts", "src/templates/assets/javascripts/components/sidebar/index.ts", "src/templates/assets/javascripts/components/source/facts/github/index.ts", "src/templates/assets/javascripts/components/source/facts/gitlab/index.ts", "src/templates/assets/javascripts/components/source/facts/_/index.ts", "src/templates/assets/javascripts/components/source/_/index.ts", "src/templates/assets/javascripts/components/tabs/index.ts", "src/templates/assets/javascripts/components/toc/index.ts", "src/templates/assets/javascripts/components/top/index.ts", "src/templates/assets/javascripts/patches/ellipsis/index.ts", "src/templates/assets/javascripts/patches/indeterminate/index.ts", "src/templates/assets/javascripts/patches/scrollfix/index.ts", "src/templates/assets/javascripts/patches/scrolllock/index.ts", "src/templates/assets/javascripts/polyfills/index.ts"], + "sourcesContent": ["(function (global, factory) {\n typeof exports === 'object' && typeof module !== 'undefined' ? factory() :\n typeof define === 'function' && define.amd ? define(factory) :\n (factory());\n}(this, (function () { 'use strict';\n\n /**\n * Applies the :focus-visible polyfill at the given scope.\n * A scope in this case is either the top-level Document or a Shadow Root.\n *\n * @param {(Document|ShadowRoot)} scope\n * @see https://github.com/WICG/focus-visible\n */\n function applyFocusVisiblePolyfill(scope) {\n var hadKeyboardEvent = true;\n var hadFocusVisibleRecently = false;\n var hadFocusVisibleRecentlyTimeout = null;\n\n var inputTypesAllowlist = {\n text: true,\n search: true,\n url: true,\n tel: true,\n email: true,\n password: true,\n number: true,\n date: true,\n month: true,\n week: true,\n time: true,\n datetime: true,\n 'datetime-local': true\n };\n\n /**\n * Helper function for legacy browsers and iframes which sometimes focus\n * elements like document, body, and non-interactive SVG.\n * @param {Element} el\n */\n function isValidFocusTarget(el) {\n if (\n el &&\n el !== document &&\n el.nodeName !== 'HTML' &&\n el.nodeName !== 'BODY' &&\n 'classList' in el &&\n 'contains' in el.classList\n ) {\n return true;\n }\n return false;\n }\n\n /**\n * Computes whether the given element should automatically trigger the\n * `focus-visible` class being added, i.e. whether it should always match\n * `:focus-visible` when focused.\n * @param {Element} el\n * @return {boolean}\n */\n function focusTriggersKeyboardModality(el) {\n var type = el.type;\n var tagName = el.tagName;\n\n if (tagName === 'INPUT' && inputTypesAllowlist[type] && !el.readOnly) {\n return true;\n }\n\n if (tagName === 'TEXTAREA' && !el.readOnly) {\n return true;\n }\n\n if (el.isContentEditable) {\n return true;\n }\n\n return false;\n }\n\n /**\n * Add the `focus-visible` class to the given element if it was not added by\n * the author.\n * @param {Element} el\n */\n function addFocusVisibleClass(el) {\n if (el.classList.contains('focus-visible')) {\n return;\n }\n el.classList.add('focus-visible');\n el.setAttribute('data-focus-visible-added', '');\n }\n\n /**\n * Remove the `focus-visible` class from the given element if it was not\n * originally added by the author.\n * @param {Element} el\n */\n function removeFocusVisibleClass(el) {\n if (!el.hasAttribute('data-focus-visible-added')) {\n return;\n }\n el.classList.remove('focus-visible');\n el.removeAttribute('data-focus-visible-added');\n }\n\n /**\n * If the most recent user interaction was via the keyboard;\n * and the key press did not include a meta, alt/option, or control key;\n * then the modality is keyboard. Otherwise, the modality is not keyboard.\n * Apply `focus-visible` to any current active element and keep track\n * of our keyboard modality state with `hadKeyboardEvent`.\n * @param {KeyboardEvent} e\n */\n function onKeyDown(e) {\n if (e.metaKey || e.altKey || e.ctrlKey) {\n return;\n }\n\n if (isValidFocusTarget(scope.activeElement)) {\n addFocusVisibleClass(scope.activeElement);\n }\n\n hadKeyboardEvent = true;\n }\n\n /**\n * If at any point a user clicks with a pointing device, ensure that we change\n * the modality away from keyboard.\n * This avoids the situation where a user presses a key on an already focused\n * element, and then clicks on a different element, focusing it with a\n * pointing device, while we still think we're in keyboard modality.\n * @param {Event} e\n */\n function onPointerDown(e) {\n hadKeyboardEvent = false;\n }\n\n /**\n * On `focus`, add the `focus-visible` class to the target if:\n * - the target received focus as a result of keyboard navigation, or\n * - the event target is an element that will likely require interaction\n * via the keyboard (e.g. a text box)\n * @param {Event} e\n */\n function onFocus(e) {\n // Prevent IE from focusing the document or HTML element.\n if (!isValidFocusTarget(e.target)) {\n return;\n }\n\n if (hadKeyboardEvent || focusTriggersKeyboardModality(e.target)) {\n addFocusVisibleClass(e.target);\n }\n }\n\n /**\n * On `blur`, remove the `focus-visible` class from the target.\n * @param {Event} e\n */\n function onBlur(e) {\n if (!isValidFocusTarget(e.target)) {\n return;\n }\n\n if (\n e.target.classList.contains('focus-visible') ||\n e.target.hasAttribute('data-focus-visible-added')\n ) {\n // To detect a tab/window switch, we look for a blur event followed\n // rapidly by a visibility change.\n // If we don't see a visibility change within 100ms, it's probably a\n // regular focus change.\n hadFocusVisibleRecently = true;\n window.clearTimeout(hadFocusVisibleRecentlyTimeout);\n hadFocusVisibleRecentlyTimeout = window.setTimeout(function() {\n hadFocusVisibleRecently = false;\n }, 100);\n removeFocusVisibleClass(e.target);\n }\n }\n\n /**\n * If the user changes tabs, keep track of whether or not the previously\n * focused element had .focus-visible.\n * @param {Event} e\n */\n function onVisibilityChange(e) {\n if (document.visibilityState === 'hidden') {\n // If the tab becomes active again, the browser will handle calling focus\n // on the element (Safari actually calls it twice).\n // If this tab change caused a blur on an element with focus-visible,\n // re-apply the class when the user switches back to the tab.\n if (hadFocusVisibleRecently) {\n hadKeyboardEvent = true;\n }\n addInitialPointerMoveListeners();\n }\n }\n\n /**\n * Add a group of listeners to detect usage of any pointing devices.\n * These listeners will be added when the polyfill first loads, and anytime\n * the window is blurred, so that they are active when the window regains\n * focus.\n */\n function addInitialPointerMoveListeners() {\n document.addEventListener('mousemove', onInitialPointerMove);\n document.addEventListener('mousedown', onInitialPointerMove);\n document.addEventListener('mouseup', onInitialPointerMove);\n document.addEventListener('pointermove', onInitialPointerMove);\n document.addEventListener('pointerdown', onInitialPointerMove);\n document.addEventListener('pointerup', onInitialPointerMove);\n document.addEventListener('touchmove', onInitialPointerMove);\n document.addEventListener('touchstart', onInitialPointerMove);\n document.addEventListener('touchend', onInitialPointerMove);\n }\n\n function removeInitialPointerMoveListeners() {\n document.removeEventListener('mousemove', onInitialPointerMove);\n document.removeEventListener('mousedown', onInitialPointerMove);\n document.removeEventListener('mouseup', onInitialPointerMove);\n document.removeEventListener('pointermove', onInitialPointerMove);\n document.removeEventListener('pointerdown', onInitialPointerMove);\n document.removeEventListener('pointerup', onInitialPointerMove);\n document.removeEventListener('touchmove', onInitialPointerMove);\n document.removeEventListener('touchstart', onInitialPointerMove);\n document.removeEventListener('touchend', onInitialPointerMove);\n }\n\n /**\n * When the polfyill first loads, assume the user is in keyboard modality.\n * If any event is received from a pointing device (e.g. mouse, pointer,\n * touch), turn off keyboard modality.\n * This accounts for situations where focus enters the page from the URL bar.\n * @param {Event} e\n */\n function onInitialPointerMove(e) {\n // Work around a Safari quirk that fires a mousemove on whenever the\n // window blurs, even if you're tabbing out of the page. \u00AF\\_(\u30C4)_/\u00AF\n if (e.target.nodeName && e.target.nodeName.toLowerCase() === 'html') {\n return;\n }\n\n hadKeyboardEvent = false;\n removeInitialPointerMoveListeners();\n }\n\n // For some kinds of state, we are interested in changes at the global scope\n // only. For example, global pointer input, global key presses and global\n // visibility change should affect the state at every scope:\n document.addEventListener('keydown', onKeyDown, true);\n document.addEventListener('mousedown', onPointerDown, true);\n document.addEventListener('pointerdown', onPointerDown, true);\n document.addEventListener('touchstart', onPointerDown, true);\n document.addEventListener('visibilitychange', onVisibilityChange, true);\n\n addInitialPointerMoveListeners();\n\n // For focus and blur, we specifically care about state changes in the local\n // scope. This is because focus / blur events that originate from within a\n // shadow root are not re-dispatched from the host element if it was already\n // the active element in its own scope:\n scope.addEventListener('focus', onFocus, true);\n scope.addEventListener('blur', onBlur, true);\n\n // We detect that a node is a ShadowRoot by ensuring that it is a\n // DocumentFragment and also has a host property. This check covers native\n // implementation and polyfill implementation transparently. If we only cared\n // about the native implementation, we could just check if the scope was\n // an instance of a ShadowRoot.\n if (scope.nodeType === Node.DOCUMENT_FRAGMENT_NODE && scope.host) {\n // Since a ShadowRoot is a special kind of DocumentFragment, it does not\n // have a root element to add a class to. So, we add this attribute to the\n // host element instead:\n scope.host.setAttribute('data-js-focus-visible', '');\n } else if (scope.nodeType === Node.DOCUMENT_NODE) {\n document.documentElement.classList.add('js-focus-visible');\n document.documentElement.setAttribute('data-js-focus-visible', '');\n }\n }\n\n // It is important to wrap all references to global window and document in\n // these checks to support server-side rendering use cases\n // @see https://github.com/WICG/focus-visible/issues/199\n if (typeof window !== 'undefined' && typeof document !== 'undefined') {\n // Make the polyfill helper globally available. This can be used as a signal\n // to interested libraries that wish to coordinate with the polyfill for e.g.,\n // applying the polyfill to a shadow root:\n window.applyFocusVisiblePolyfill = applyFocusVisiblePolyfill;\n\n // Notify interested libraries of the polyfill's presence, in case the\n // polyfill was loaded lazily:\n var event;\n\n try {\n event = new CustomEvent('focus-visible-polyfill-ready');\n } catch (error) {\n // IE11 does not support using CustomEvent as a constructor directly:\n event = document.createEvent('CustomEvent');\n event.initCustomEvent('focus-visible-polyfill-ready', false, false, {});\n }\n\n window.dispatchEvent(event);\n }\n\n if (typeof document !== 'undefined') {\n // Apply the polyfill to the global document, so that no JavaScript\n // coordination is required to use the polyfill in the top-level document:\n applyFocusVisiblePolyfill(document);\n }\n\n})));\n", "/*!\n * clipboard.js v2.0.11\n * https://clipboardjs.com/\n *\n * Licensed MIT \u00A9 Zeno Rocha\n */\n(function webpackUniversalModuleDefinition(root, factory) {\n\tif(typeof exports === 'object' && typeof module === 'object')\n\t\tmodule.exports = factory();\n\telse if(typeof define === 'function' && define.amd)\n\t\tdefine([], factory);\n\telse if(typeof exports === 'object')\n\t\texports[\"ClipboardJS\"] = factory();\n\telse\n\t\troot[\"ClipboardJS\"] = factory();\n})(this, function() {\nreturn /******/ (function() { // webpackBootstrap\n/******/ \tvar __webpack_modules__ = ({\n\n/***/ 686:\n/***/ (function(__unused_webpack_module, __webpack_exports__, __webpack_require__) {\n\n\"use strict\";\n\n// EXPORTS\n__webpack_require__.d(__webpack_exports__, {\n \"default\": function() { return /* binding */ clipboard; }\n});\n\n// EXTERNAL MODULE: ./node_modules/tiny-emitter/index.js\nvar tiny_emitter = __webpack_require__(279);\nvar tiny_emitter_default = /*#__PURE__*/__webpack_require__.n(tiny_emitter);\n// EXTERNAL MODULE: ./node_modules/good-listener/src/listen.js\nvar listen = __webpack_require__(370);\nvar listen_default = /*#__PURE__*/__webpack_require__.n(listen);\n// EXTERNAL MODULE: ./node_modules/select/src/select.js\nvar src_select = __webpack_require__(817);\nvar select_default = /*#__PURE__*/__webpack_require__.n(src_select);\n;// CONCATENATED MODULE: ./src/common/command.js\n/**\n * Executes a given operation type.\n * @param {String} type\n * @return {Boolean}\n */\nfunction command(type) {\n try {\n return document.execCommand(type);\n } catch (err) {\n return false;\n }\n}\n;// CONCATENATED MODULE: ./src/actions/cut.js\n\n\n/**\n * Cut action wrapper.\n * @param {String|HTMLElement} target\n * @return {String}\n */\n\nvar ClipboardActionCut = function ClipboardActionCut(target) {\n var selectedText = select_default()(target);\n command('cut');\n return selectedText;\n};\n\n/* harmony default export */ var actions_cut = (ClipboardActionCut);\n;// CONCATENATED MODULE: ./src/common/create-fake-element.js\n/**\n * Creates a fake textarea element with a value.\n * @param {String} value\n * @return {HTMLElement}\n */\nfunction createFakeElement(value) {\n var isRTL = document.documentElement.getAttribute('dir') === 'rtl';\n var fakeElement = document.createElement('textarea'); // Prevent zooming on iOS\n\n fakeElement.style.fontSize = '12pt'; // Reset box model\n\n fakeElement.style.border = '0';\n fakeElement.style.padding = '0';\n fakeElement.style.margin = '0'; // Move element out of screen horizontally\n\n fakeElement.style.position = 'absolute';\n fakeElement.style[isRTL ? 'right' : 'left'] = '-9999px'; // Move element to the same position vertically\n\n var yPosition = window.pageYOffset || document.documentElement.scrollTop;\n fakeElement.style.top = \"\".concat(yPosition, \"px\");\n fakeElement.setAttribute('readonly', '');\n fakeElement.value = value;\n return fakeElement;\n}\n;// CONCATENATED MODULE: ./src/actions/copy.js\n\n\n\n/**\n * Create fake copy action wrapper using a fake element.\n * @param {String} target\n * @param {Object} options\n * @return {String}\n */\n\nvar fakeCopyAction = function fakeCopyAction(value, options) {\n var fakeElement = createFakeElement(value);\n options.container.appendChild(fakeElement);\n var selectedText = select_default()(fakeElement);\n command('copy');\n fakeElement.remove();\n return selectedText;\n};\n/**\n * Copy action wrapper.\n * @param {String|HTMLElement} target\n * @param {Object} options\n * @return {String}\n */\n\n\nvar ClipboardActionCopy = function ClipboardActionCopy(target) {\n var options = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : {\n container: document.body\n };\n var selectedText = '';\n\n if (typeof target === 'string') {\n selectedText = fakeCopyAction(target, options);\n } else if (target instanceof HTMLInputElement && !['text', 'search', 'url', 'tel', 'password'].includes(target === null || target === void 0 ? void 0 : target.type)) {\n // If input type doesn't support `setSelectionRange`. Simulate it. https://developer.mozilla.org/en-US/docs/Web/API/HTMLInputElement/setSelectionRange\n selectedText = fakeCopyAction(target.value, options);\n } else {\n selectedText = select_default()(target);\n command('copy');\n }\n\n return selectedText;\n};\n\n/* harmony default export */ var actions_copy = (ClipboardActionCopy);\n;// CONCATENATED MODULE: ./src/actions/default.js\nfunction _typeof(obj) { \"@babel/helpers - typeof\"; if (typeof Symbol === \"function\" && typeof Symbol.iterator === \"symbol\") { _typeof = function _typeof(obj) { return typeof obj; }; } else { _typeof = function _typeof(obj) { return obj && typeof Symbol === \"function\" && obj.constructor === Symbol && obj !== Symbol.prototype ? \"symbol\" : typeof obj; }; } return _typeof(obj); }\n\n\n\n/**\n * Inner function which performs selection from either `text` or `target`\n * properties and then executes copy or cut operations.\n * @param {Object} options\n */\n\nvar ClipboardActionDefault = function ClipboardActionDefault() {\n var options = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : {};\n // Defines base properties passed from constructor.\n var _options$action = options.action,\n action = _options$action === void 0 ? 'copy' : _options$action,\n container = options.container,\n target = options.target,\n text = options.text; // Sets the `action` to be performed which can be either 'copy' or 'cut'.\n\n if (action !== 'copy' && action !== 'cut') {\n throw new Error('Invalid \"action\" value, use either \"copy\" or \"cut\"');\n } // Sets the `target` property using an element that will be have its content copied.\n\n\n if (target !== undefined) {\n if (target && _typeof(target) === 'object' && target.nodeType === 1) {\n if (action === 'copy' && target.hasAttribute('disabled')) {\n throw new Error('Invalid \"target\" attribute. Please use \"readonly\" instead of \"disabled\" attribute');\n }\n\n if (action === 'cut' && (target.hasAttribute('readonly') || target.hasAttribute('disabled'))) {\n throw new Error('Invalid \"target\" attribute. You can\\'t cut text from elements with \"readonly\" or \"disabled\" attributes');\n }\n } else {\n throw new Error('Invalid \"target\" value, use a valid Element');\n }\n } // Define selection strategy based on `text` property.\n\n\n if (text) {\n return actions_copy(text, {\n container: container\n });\n } // Defines which selection strategy based on `target` property.\n\n\n if (target) {\n return action === 'cut' ? actions_cut(target) : actions_copy(target, {\n container: container\n });\n }\n};\n\n/* harmony default export */ var actions_default = (ClipboardActionDefault);\n;// CONCATENATED MODULE: ./src/clipboard.js\nfunction clipboard_typeof(obj) { \"@babel/helpers - typeof\"; if (typeof Symbol === \"function\" && typeof Symbol.iterator === \"symbol\") { clipboard_typeof = function _typeof(obj) { return typeof obj; }; } else { clipboard_typeof = function _typeof(obj) { return obj && typeof Symbol === \"function\" && obj.constructor === Symbol && obj !== Symbol.prototype ? \"symbol\" : typeof obj; }; } return clipboard_typeof(obj); }\n\nfunction _classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError(\"Cannot call a class as a function\"); } }\n\nfunction _defineProperties(target, props) { for (var i = 0; i < props.length; i++) { var descriptor = props[i]; descriptor.enumerable = descriptor.enumerable || false; descriptor.configurable = true; if (\"value\" in descriptor) descriptor.writable = true; Object.defineProperty(target, descriptor.key, descriptor); } }\n\nfunction _createClass(Constructor, protoProps, staticProps) { if (protoProps) _defineProperties(Constructor.prototype, protoProps); if (staticProps) _defineProperties(Constructor, staticProps); return Constructor; }\n\nfunction _inherits(subClass, superClass) { if (typeof superClass !== \"function\" && superClass !== null) { throw new TypeError(\"Super expression must either be null or a function\"); } subClass.prototype = Object.create(superClass && superClass.prototype, { constructor: { value: subClass, writable: true, configurable: true } }); if (superClass) _setPrototypeOf(subClass, superClass); }\n\nfunction _setPrototypeOf(o, p) { _setPrototypeOf = Object.setPrototypeOf || function _setPrototypeOf(o, p) { o.__proto__ = p; return o; }; return _setPrototypeOf(o, p); }\n\nfunction _createSuper(Derived) { var hasNativeReflectConstruct = _isNativeReflectConstruct(); return function _createSuperInternal() { var Super = _getPrototypeOf(Derived), result; if (hasNativeReflectConstruct) { var NewTarget = _getPrototypeOf(this).constructor; result = Reflect.construct(Super, arguments, NewTarget); } else { result = Super.apply(this, arguments); } return _possibleConstructorReturn(this, result); }; }\n\nfunction _possibleConstructorReturn(self, call) { if (call && (clipboard_typeof(call) === \"object\" || typeof call === \"function\")) { return call; } return _assertThisInitialized(self); }\n\nfunction _assertThisInitialized(self) { if (self === void 0) { throw new ReferenceError(\"this hasn't been initialised - super() hasn't been called\"); } return self; }\n\nfunction _isNativeReflectConstruct() { if (typeof Reflect === \"undefined\" || !Reflect.construct) return false; if (Reflect.construct.sham) return false; if (typeof Proxy === \"function\") return true; try { Date.prototype.toString.call(Reflect.construct(Date, [], function () {})); return true; } catch (e) { return false; } }\n\nfunction _getPrototypeOf(o) { _getPrototypeOf = Object.setPrototypeOf ? Object.getPrototypeOf : function _getPrototypeOf(o) { return o.__proto__ || Object.getPrototypeOf(o); }; return _getPrototypeOf(o); }\n\n\n\n\n\n\n/**\n * Helper function to retrieve attribute value.\n * @param {String} suffix\n * @param {Element} element\n */\n\nfunction getAttributeValue(suffix, element) {\n var attribute = \"data-clipboard-\".concat(suffix);\n\n if (!element.hasAttribute(attribute)) {\n return;\n }\n\n return element.getAttribute(attribute);\n}\n/**\n * Base class which takes one or more elements, adds event listeners to them,\n * and instantiates a new `ClipboardAction` on each click.\n */\n\n\nvar Clipboard = /*#__PURE__*/function (_Emitter) {\n _inherits(Clipboard, _Emitter);\n\n var _super = _createSuper(Clipboard);\n\n /**\n * @param {String|HTMLElement|HTMLCollection|NodeList} trigger\n * @param {Object} options\n */\n function Clipboard(trigger, options) {\n var _this;\n\n _classCallCheck(this, Clipboard);\n\n _this = _super.call(this);\n\n _this.resolveOptions(options);\n\n _this.listenClick(trigger);\n\n return _this;\n }\n /**\n * Defines if attributes would be resolved using internal setter functions\n * or custom functions that were passed in the constructor.\n * @param {Object} options\n */\n\n\n _createClass(Clipboard, [{\n key: \"resolveOptions\",\n value: function resolveOptions() {\n var options = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : {};\n this.action = typeof options.action === 'function' ? options.action : this.defaultAction;\n this.target = typeof options.target === 'function' ? options.target : this.defaultTarget;\n this.text = typeof options.text === 'function' ? options.text : this.defaultText;\n this.container = clipboard_typeof(options.container) === 'object' ? options.container : document.body;\n }\n /**\n * Adds a click event listener to the passed trigger.\n * @param {String|HTMLElement|HTMLCollection|NodeList} trigger\n */\n\n }, {\n key: \"listenClick\",\n value: function listenClick(trigger) {\n var _this2 = this;\n\n this.listener = listen_default()(trigger, 'click', function (e) {\n return _this2.onClick(e);\n });\n }\n /**\n * Defines a new `ClipboardAction` on each click event.\n * @param {Event} e\n */\n\n }, {\n key: \"onClick\",\n value: function onClick(e) {\n var trigger = e.delegateTarget || e.currentTarget;\n var action = this.action(trigger) || 'copy';\n var text = actions_default({\n action: action,\n container: this.container,\n target: this.target(trigger),\n text: this.text(trigger)\n }); // Fires an event based on the copy operation result.\n\n this.emit(text ? 'success' : 'error', {\n action: action,\n text: text,\n trigger: trigger,\n clearSelection: function clearSelection() {\n if (trigger) {\n trigger.focus();\n }\n\n window.getSelection().removeAllRanges();\n }\n });\n }\n /**\n * Default `action` lookup function.\n * @param {Element} trigger\n */\n\n }, {\n key: \"defaultAction\",\n value: function defaultAction(trigger) {\n return getAttributeValue('action', trigger);\n }\n /**\n * Default `target` lookup function.\n * @param {Element} trigger\n */\n\n }, {\n key: \"defaultTarget\",\n value: function defaultTarget(trigger) {\n var selector = getAttributeValue('target', trigger);\n\n if (selector) {\n return document.querySelector(selector);\n }\n }\n /**\n * Allow fire programmatically a copy action\n * @param {String|HTMLElement} target\n * @param {Object} options\n * @returns Text copied.\n */\n\n }, {\n key: \"defaultText\",\n\n /**\n * Default `text` lookup function.\n * @param {Element} trigger\n */\n value: function defaultText(trigger) {\n return getAttributeValue('text', trigger);\n }\n /**\n * Destroy lifecycle.\n */\n\n }, {\n key: \"destroy\",\n value: function destroy() {\n this.listener.destroy();\n }\n }], [{\n key: \"copy\",\n value: function copy(target) {\n var options = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : {\n container: document.body\n };\n return actions_copy(target, options);\n }\n /**\n * Allow fire programmatically a cut action\n * @param {String|HTMLElement} target\n * @returns Text cutted.\n */\n\n }, {\n key: \"cut\",\n value: function cut(target) {\n return actions_cut(target);\n }\n /**\n * Returns the support of the given action, or all actions if no action is\n * given.\n * @param {String} [action]\n */\n\n }, {\n key: \"isSupported\",\n value: function isSupported() {\n var action = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : ['copy', 'cut'];\n var actions = typeof action === 'string' ? [action] : action;\n var support = !!document.queryCommandSupported;\n actions.forEach(function (action) {\n support = support && !!document.queryCommandSupported(action);\n });\n return support;\n }\n }]);\n\n return Clipboard;\n}((tiny_emitter_default()));\n\n/* harmony default export */ var clipboard = (Clipboard);\n\n/***/ }),\n\n/***/ 828:\n/***/ (function(module) {\n\nvar DOCUMENT_NODE_TYPE = 9;\n\n/**\n * A polyfill for Element.matches()\n */\nif (typeof Element !== 'undefined' && !Element.prototype.matches) {\n var proto = Element.prototype;\n\n proto.matches = proto.matchesSelector ||\n proto.mozMatchesSelector ||\n proto.msMatchesSelector ||\n proto.oMatchesSelector ||\n proto.webkitMatchesSelector;\n}\n\n/**\n * Finds the closest parent that matches a selector.\n *\n * @param {Element} element\n * @param {String} selector\n * @return {Function}\n */\nfunction closest (element, selector) {\n while (element && element.nodeType !== DOCUMENT_NODE_TYPE) {\n if (typeof element.matches === 'function' &&\n element.matches(selector)) {\n return element;\n }\n element = element.parentNode;\n }\n}\n\nmodule.exports = closest;\n\n\n/***/ }),\n\n/***/ 438:\n/***/ (function(module, __unused_webpack_exports, __webpack_require__) {\n\nvar closest = __webpack_require__(828);\n\n/**\n * Delegates event to a selector.\n *\n * @param {Element} element\n * @param {String} selector\n * @param {String} type\n * @param {Function} callback\n * @param {Boolean} useCapture\n * @return {Object}\n */\nfunction _delegate(element, selector, type, callback, useCapture) {\n var listenerFn = listener.apply(this, arguments);\n\n element.addEventListener(type, listenerFn, useCapture);\n\n return {\n destroy: function() {\n element.removeEventListener(type, listenerFn, useCapture);\n }\n }\n}\n\n/**\n * Delegates event to a selector.\n *\n * @param {Element|String|Array} [elements]\n * @param {String} selector\n * @param {String} type\n * @param {Function} callback\n * @param {Boolean} useCapture\n * @return {Object}\n */\nfunction delegate(elements, selector, type, callback, useCapture) {\n // Handle the regular Element usage\n if (typeof elements.addEventListener === 'function') {\n return _delegate.apply(null, arguments);\n }\n\n // Handle Element-less usage, it defaults to global delegation\n if (typeof type === 'function') {\n // Use `document` as the first parameter, then apply arguments\n // This is a short way to .unshift `arguments` without running into deoptimizations\n return _delegate.bind(null, document).apply(null, arguments);\n }\n\n // Handle Selector-based usage\n if (typeof elements === 'string') {\n elements = document.querySelectorAll(elements);\n }\n\n // Handle Array-like based usage\n return Array.prototype.map.call(elements, function (element) {\n return _delegate(element, selector, type, callback, useCapture);\n });\n}\n\n/**\n * Finds closest match and invokes callback.\n *\n * @param {Element} element\n * @param {String} selector\n * @param {String} type\n * @param {Function} callback\n * @return {Function}\n */\nfunction listener(element, selector, type, callback) {\n return function(e) {\n e.delegateTarget = closest(e.target, selector);\n\n if (e.delegateTarget) {\n callback.call(element, e);\n }\n }\n}\n\nmodule.exports = delegate;\n\n\n/***/ }),\n\n/***/ 879:\n/***/ (function(__unused_webpack_module, exports) {\n\n/**\n * Check if argument is a HTML element.\n *\n * @param {Object} value\n * @return {Boolean}\n */\nexports.node = function(value) {\n return value !== undefined\n && value instanceof HTMLElement\n && value.nodeType === 1;\n};\n\n/**\n * Check if argument is a list of HTML elements.\n *\n * @param {Object} value\n * @return {Boolean}\n */\nexports.nodeList = function(value) {\n var type = Object.prototype.toString.call(value);\n\n return value !== undefined\n && (type === '[object NodeList]' || type === '[object HTMLCollection]')\n && ('length' in value)\n && (value.length === 0 || exports.node(value[0]));\n};\n\n/**\n * Check if argument is a string.\n *\n * @param {Object} value\n * @return {Boolean}\n */\nexports.string = function(value) {\n return typeof value === 'string'\n || value instanceof String;\n};\n\n/**\n * Check if argument is a function.\n *\n * @param {Object} value\n * @return {Boolean}\n */\nexports.fn = function(value) {\n var type = Object.prototype.toString.call(value);\n\n return type === '[object Function]';\n};\n\n\n/***/ }),\n\n/***/ 370:\n/***/ (function(module, __unused_webpack_exports, __webpack_require__) {\n\nvar is = __webpack_require__(879);\nvar delegate = __webpack_require__(438);\n\n/**\n * Validates all params and calls the right\n * listener function based on its target type.\n *\n * @param {String|HTMLElement|HTMLCollection|NodeList} target\n * @param {String} type\n * @param {Function} callback\n * @return {Object}\n */\nfunction listen(target, type, callback) {\n if (!target && !type && !callback) {\n throw new Error('Missing required arguments');\n }\n\n if (!is.string(type)) {\n throw new TypeError('Second argument must be a String');\n }\n\n if (!is.fn(callback)) {\n throw new TypeError('Third argument must be a Function');\n }\n\n if (is.node(target)) {\n return listenNode(target, type, callback);\n }\n else if (is.nodeList(target)) {\n return listenNodeList(target, type, callback);\n }\n else if (is.string(target)) {\n return listenSelector(target, type, callback);\n }\n else {\n throw new TypeError('First argument must be a String, HTMLElement, HTMLCollection, or NodeList');\n }\n}\n\n/**\n * Adds an event listener to a HTML element\n * and returns a remove listener function.\n *\n * @param {HTMLElement} node\n * @param {String} type\n * @param {Function} callback\n * @return {Object}\n */\nfunction listenNode(node, type, callback) {\n node.addEventListener(type, callback);\n\n return {\n destroy: function() {\n node.removeEventListener(type, callback);\n }\n }\n}\n\n/**\n * Add an event listener to a list of HTML elements\n * and returns a remove listener function.\n *\n * @param {NodeList|HTMLCollection} nodeList\n * @param {String} type\n * @param {Function} callback\n * @return {Object}\n */\nfunction listenNodeList(nodeList, type, callback) {\n Array.prototype.forEach.call(nodeList, function(node) {\n node.addEventListener(type, callback);\n });\n\n return {\n destroy: function() {\n Array.prototype.forEach.call(nodeList, function(node) {\n node.removeEventListener(type, callback);\n });\n }\n }\n}\n\n/**\n * Add an event listener to a selector\n * and returns a remove listener function.\n *\n * @param {String} selector\n * @param {String} type\n * @param {Function} callback\n * @return {Object}\n */\nfunction listenSelector(selector, type, callback) {\n return delegate(document.body, selector, type, callback);\n}\n\nmodule.exports = listen;\n\n\n/***/ }),\n\n/***/ 817:\n/***/ (function(module) {\n\nfunction select(element) {\n var selectedText;\n\n if (element.nodeName === 'SELECT') {\n element.focus();\n\n selectedText = element.value;\n }\n else if (element.nodeName === 'INPUT' || element.nodeName === 'TEXTAREA') {\n var isReadOnly = element.hasAttribute('readonly');\n\n if (!isReadOnly) {\n element.setAttribute('readonly', '');\n }\n\n element.select();\n element.setSelectionRange(0, element.value.length);\n\n if (!isReadOnly) {\n element.removeAttribute('readonly');\n }\n\n selectedText = element.value;\n }\n else {\n if (element.hasAttribute('contenteditable')) {\n element.focus();\n }\n\n var selection = window.getSelection();\n var range = document.createRange();\n\n range.selectNodeContents(element);\n selection.removeAllRanges();\n selection.addRange(range);\n\n selectedText = selection.toString();\n }\n\n return selectedText;\n}\n\nmodule.exports = select;\n\n\n/***/ }),\n\n/***/ 279:\n/***/ (function(module) {\n\nfunction E () {\n // Keep this empty so it's easier to inherit from\n // (via https://github.com/lipsmack from https://github.com/scottcorgan/tiny-emitter/issues/3)\n}\n\nE.prototype = {\n on: function (name, callback, ctx) {\n var e = this.e || (this.e = {});\n\n (e[name] || (e[name] = [])).push({\n fn: callback,\n ctx: ctx\n });\n\n return this;\n },\n\n once: function (name, callback, ctx) {\n var self = this;\n function listener () {\n self.off(name, listener);\n callback.apply(ctx, arguments);\n };\n\n listener._ = callback\n return this.on(name, listener, ctx);\n },\n\n emit: function (name) {\n var data = [].slice.call(arguments, 1);\n var evtArr = ((this.e || (this.e = {}))[name] || []).slice();\n var i = 0;\n var len = evtArr.length;\n\n for (i; i < len; i++) {\n evtArr[i].fn.apply(evtArr[i].ctx, data);\n }\n\n return this;\n },\n\n off: function (name, callback) {\n var e = this.e || (this.e = {});\n var evts = e[name];\n var liveEvents = [];\n\n if (evts && callback) {\n for (var i = 0, len = evts.length; i < len; i++) {\n if (evts[i].fn !== callback && evts[i].fn._ !== callback)\n liveEvents.push(evts[i]);\n }\n }\n\n // Remove event from queue to prevent memory leak\n // Suggested by https://github.com/lazd\n // Ref: https://github.com/scottcorgan/tiny-emitter/commit/c6ebfaa9bc973b33d110a84a307742b7cf94c953#commitcomment-5024910\n\n (liveEvents.length)\n ? e[name] = liveEvents\n : delete e[name];\n\n return this;\n }\n};\n\nmodule.exports = E;\nmodule.exports.TinyEmitter = E;\n\n\n/***/ })\n\n/******/ \t});\n/************************************************************************/\n/******/ \t// The module cache\n/******/ \tvar __webpack_module_cache__ = {};\n/******/ \t\n/******/ \t// The require function\n/******/ \tfunction __webpack_require__(moduleId) {\n/******/ \t\t// Check if module is in cache\n/******/ \t\tif(__webpack_module_cache__[moduleId]) {\n/******/ \t\t\treturn __webpack_module_cache__[moduleId].exports;\n/******/ \t\t}\n/******/ \t\t// Create a new module (and put it into the cache)\n/******/ \t\tvar module = __webpack_module_cache__[moduleId] = {\n/******/ \t\t\t// no module.id needed\n/******/ \t\t\t// no module.loaded needed\n/******/ \t\t\texports: {}\n/******/ \t\t};\n/******/ \t\n/******/ \t\t// Execute the module function\n/******/ \t\t__webpack_modules__[moduleId](module, module.exports, __webpack_require__);\n/******/ \t\n/******/ \t\t// Return the exports of the module\n/******/ \t\treturn module.exports;\n/******/ \t}\n/******/ \t\n/************************************************************************/\n/******/ \t/* webpack/runtime/compat get default export */\n/******/ \t!function() {\n/******/ \t\t// getDefaultExport function for compatibility with non-harmony modules\n/******/ \t\t__webpack_require__.n = function(module) {\n/******/ \t\t\tvar getter = module && module.__esModule ?\n/******/ \t\t\t\tfunction() { return module['default']; } :\n/******/ \t\t\t\tfunction() { return module; };\n/******/ \t\t\t__webpack_require__.d(getter, { a: getter });\n/******/ \t\t\treturn getter;\n/******/ \t\t};\n/******/ \t}();\n/******/ \t\n/******/ \t/* webpack/runtime/define property getters */\n/******/ \t!function() {\n/******/ \t\t// define getter functions for harmony exports\n/******/ \t\t__webpack_require__.d = function(exports, definition) {\n/******/ \t\t\tfor(var key in definition) {\n/******/ \t\t\t\tif(__webpack_require__.o(definition, key) && !__webpack_require__.o(exports, key)) {\n/******/ \t\t\t\t\tObject.defineProperty(exports, key, { enumerable: true, get: definition[key] });\n/******/ \t\t\t\t}\n/******/ \t\t\t}\n/******/ \t\t};\n/******/ \t}();\n/******/ \t\n/******/ \t/* webpack/runtime/hasOwnProperty shorthand */\n/******/ \t!function() {\n/******/ \t\t__webpack_require__.o = function(obj, prop) { return Object.prototype.hasOwnProperty.call(obj, prop); }\n/******/ \t}();\n/******/ \t\n/************************************************************************/\n/******/ \t// module exports must be returned from runtime so entry inlining is disabled\n/******/ \t// startup\n/******/ \t// Load entry module and return exports\n/******/ \treturn __webpack_require__(686);\n/******/ })()\n.default;\n});", "/*!\n * escape-html\n * Copyright(c) 2012-2013 TJ Holowaychuk\n * Copyright(c) 2015 Andreas Lubbe\n * Copyright(c) 2015 Tiancheng \"Timothy\" Gu\n * MIT Licensed\n */\n\n'use strict';\n\n/**\n * Module variables.\n * @private\n */\n\nvar matchHtmlRegExp = /[\"'&<>]/;\n\n/**\n * Module exports.\n * @public\n */\n\nmodule.exports = escapeHtml;\n\n/**\n * Escape special characters in the given string of html.\n *\n * @param {string} string The string to escape for inserting into HTML\n * @return {string}\n * @public\n */\n\nfunction escapeHtml(string) {\n var str = '' + string;\n var match = matchHtmlRegExp.exec(str);\n\n if (!match) {\n return str;\n }\n\n var escape;\n var html = '';\n var index = 0;\n var lastIndex = 0;\n\n for (index = match.index; index < str.length; index++) {\n switch (str.charCodeAt(index)) {\n case 34: // \"\n escape = '"';\n break;\n case 38: // &\n escape = '&';\n break;\n case 39: // '\n escape = ''';\n break;\n case 60: // <\n escape = '<';\n break;\n case 62: // >\n escape = '>';\n break;\n default:\n continue;\n }\n\n if (lastIndex !== index) {\n html += str.substring(lastIndex, index);\n }\n\n lastIndex = index + 1;\n html += escape;\n }\n\n return lastIndex !== index\n ? html + str.substring(lastIndex, index)\n : html;\n}\n", "/*\n * Copyright (c) 2016-2024 Martin Donath \n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport \"focus-visible\"\n\nimport {\n EMPTY,\n NEVER,\n Observable,\n Subject,\n defer,\n delay,\n filter,\n map,\n merge,\n mergeWith,\n shareReplay,\n switchMap\n} from \"rxjs\"\n\nimport { configuration, feature } from \"./_\"\nimport {\n at,\n getActiveElement,\n getOptionalElement,\n requestJSON,\n setLocation,\n setToggle,\n watchDocument,\n watchKeyboard,\n watchLocation,\n watchLocationTarget,\n watchMedia,\n watchPrint,\n watchScript,\n watchViewport\n} from \"./browser\"\nimport {\n getComponentElement,\n getComponentElements,\n mountAnnounce,\n mountBackToTop,\n mountConsent,\n mountContent,\n mountDialog,\n mountHeader,\n mountHeaderTitle,\n mountPalette,\n mountProgress,\n mountSearch,\n mountSearchHiglight,\n mountSidebar,\n mountSource,\n mountTableOfContents,\n mountTabs,\n watchHeader,\n watchMain\n} from \"./components\"\nimport {\n SearchIndex,\n setupClipboardJS,\n setupInstantNavigation,\n setupVersionSelector\n} from \"./integrations\"\nimport {\n patchEllipsis,\n patchIndeterminate,\n patchScrollfix,\n patchScrolllock\n} from \"./patches\"\nimport \"./polyfills\"\n\n/* ----------------------------------------------------------------------------\n * Functions - @todo refactor\n * ------------------------------------------------------------------------- */\n\n/**\n * Fetch search index\n *\n * @returns Search index observable\n */\nfunction fetchSearchIndex(): Observable {\n if (location.protocol === \"file:\") {\n return watchScript(\n `${new URL(\"search/search_index.js\", config.base)}`\n )\n .pipe(\n // @ts-ignore - @todo fix typings\n map(() => __index),\n shareReplay(1)\n )\n } else {\n return requestJSON(\n new URL(\"search/search_index.json\", config.base)\n )\n }\n}\n\n/* ----------------------------------------------------------------------------\n * Application\n * ------------------------------------------------------------------------- */\n\n/* Yay, JavaScript is available */\ndocument.documentElement.classList.remove(\"no-js\")\ndocument.documentElement.classList.add(\"js\")\n\n/* Set up navigation observables and subjects */\nconst document$ = watchDocument()\nconst location$ = watchLocation()\nconst target$ = watchLocationTarget(location$)\nconst keyboard$ = watchKeyboard()\n\n/* Set up media observables */\nconst viewport$ = watchViewport()\nconst tablet$ = watchMedia(\"(min-width: 960px)\")\nconst screen$ = watchMedia(\"(min-width: 1220px)\")\nconst print$ = watchPrint()\n\n/* Retrieve search index, if search is enabled */\nconst config = configuration()\nconst index$ = document.forms.namedItem(\"search\")\n ? fetchSearchIndex()\n : NEVER\n\n/* Set up Clipboard.js integration */\nconst alert$ = new Subject()\nsetupClipboardJS({ alert$ })\n\n/* Set up progress indicator */\nconst progress$ = new Subject()\n\n/* Set up instant navigation, if enabled */\nif (feature(\"navigation.instant\"))\n setupInstantNavigation({ location$, viewport$, progress$ })\n .subscribe(document$)\n\n/* Set up version selector */\nif (config.version?.provider === \"mike\")\n setupVersionSelector({ document$ })\n\n/* Always close drawer and search on navigation */\nmerge(location$, target$)\n .pipe(\n delay(125)\n )\n .subscribe(() => {\n setToggle(\"drawer\", false)\n setToggle(\"search\", false)\n })\n\n/* Set up global keyboard handlers */\nkeyboard$\n .pipe(\n filter(({ mode }) => mode === \"global\")\n )\n .subscribe(key => {\n switch (key.type) {\n\n /* Go to previous page */\n case \"p\":\n case \",\":\n const prev = getOptionalElement(\"link[rel=prev]\")\n if (typeof prev !== \"undefined\")\n setLocation(prev)\n break\n\n /* Go to next page */\n case \"n\":\n case \".\":\n const next = getOptionalElement(\"link[rel=next]\")\n if (typeof next !== \"undefined\")\n setLocation(next)\n break\n\n /* Expand navigation, see https://bit.ly/3ZjG5io */\n case \"Enter\":\n const active = getActiveElement()\n if (active instanceof HTMLLabelElement)\n active.click()\n }\n })\n\n/* Set up patches */\npatchEllipsis({ viewport$, document$ })\npatchIndeterminate({ document$, tablet$ })\npatchScrollfix({ document$ })\npatchScrolllock({ viewport$, tablet$ })\n\n/* Set up header and main area observable */\nconst header$ = watchHeader(getComponentElement(\"header\"), { viewport$ })\nconst main$ = document$\n .pipe(\n map(() => getComponentElement(\"main\")),\n switchMap(el => watchMain(el, { viewport$, header$ })),\n shareReplay(1)\n )\n\n/* Set up control component observables */\nconst control$ = merge(\n\n /* Consent */\n ...getComponentElements(\"consent\")\n .map(el => mountConsent(el, { target$ })),\n\n /* Dialog */\n ...getComponentElements(\"dialog\")\n .map(el => mountDialog(el, { alert$ })),\n\n /* Header */\n ...getComponentElements(\"header\")\n .map(el => mountHeader(el, { viewport$, header$, main$ })),\n\n /* Color palette */\n ...getComponentElements(\"palette\")\n .map(el => mountPalette(el)),\n\n /* Progress bar */\n ...getComponentElements(\"progress\")\n .map(el => mountProgress(el, { progress$ })),\n\n /* Search */\n ...getComponentElements(\"search\")\n .map(el => mountSearch(el, { index$, keyboard$ })),\n\n /* Repository information */\n ...getComponentElements(\"source\")\n .map(el => mountSource(el))\n)\n\n/* Set up content component observables */\nconst content$ = defer(() => merge(\n\n /* Announcement bar */\n ...getComponentElements(\"announce\")\n .map(el => mountAnnounce(el)),\n\n /* Content */\n ...getComponentElements(\"content\")\n .map(el => mountContent(el, { viewport$, target$, print$ })),\n\n /* Search highlighting */\n ...getComponentElements(\"content\")\n .map(el => feature(\"search.highlight\")\n ? mountSearchHiglight(el, { index$, location$ })\n : EMPTY\n ),\n\n /* Header title */\n ...getComponentElements(\"header-title\")\n .map(el => mountHeaderTitle(el, { viewport$, header$ })),\n\n /* Sidebar */\n ...getComponentElements(\"sidebar\")\n .map(el => el.getAttribute(\"data-md-type\") === \"navigation\"\n ? at(screen$, () => mountSidebar(el, { viewport$, header$, main$ }))\n : at(tablet$, () => mountSidebar(el, { viewport$, header$, main$ }))\n ),\n\n /* Navigation tabs */\n ...getComponentElements(\"tabs\")\n .map(el => mountTabs(el, { viewport$, header$ })),\n\n /* Table of contents */\n ...getComponentElements(\"toc\")\n .map(el => mountTableOfContents(el, {\n viewport$, header$, main$, target$\n })),\n\n /* Back-to-top button */\n ...getComponentElements(\"top\")\n .map(el => mountBackToTop(el, { viewport$, header$, main$, target$ }))\n))\n\n/* Set up component observables */\nconst component$ = document$\n .pipe(\n switchMap(() => content$),\n mergeWith(control$),\n shareReplay(1)\n )\n\n/* Subscribe to all components */\ncomponent$.subscribe()\n\n/* ----------------------------------------------------------------------------\n * Exports\n * ------------------------------------------------------------------------- */\n\nwindow.document$ = document$ /* Document observable */\nwindow.location$ = location$ /* Location subject */\nwindow.target$ = target$ /* Location target observable */\nwindow.keyboard$ = keyboard$ /* Keyboard observable */\nwindow.viewport$ = viewport$ /* Viewport observable */\nwindow.tablet$ = tablet$ /* Media tablet observable */\nwindow.screen$ = screen$ /* Media screen observable */\nwindow.print$ = print$ /* Media print observable */\nwindow.alert$ = alert$ /* Alert subject */\nwindow.progress$ = progress$ /* Progress indicator subject */\nwindow.component$ = component$ /* Component observable */\n", "/*! *****************************************************************************\r\nCopyright (c) Microsoft Corporation.\r\n\r\nPermission to use, copy, modify, and/or distribute this software for any\r\npurpose with or without fee is hereby granted.\r\n\r\nTHE SOFTWARE IS PROVIDED \"AS IS\" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH\r\nREGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY\r\nAND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,\r\nINDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM\r\nLOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR\r\nOTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR\r\nPERFORMANCE OF THIS SOFTWARE.\r\n***************************************************************************** */\r\n/* global Reflect, Promise */\r\n\r\nvar extendStatics = function(d, b) {\r\n extendStatics = Object.setPrototypeOf ||\r\n ({ __proto__: [] } instanceof Array && function (d, b) { d.__proto__ = b; }) ||\r\n function (d, b) { for (var p in b) if (Object.prototype.hasOwnProperty.call(b, p)) d[p] = b[p]; };\r\n return extendStatics(d, b);\r\n};\r\n\r\nexport function __extends(d, b) {\r\n if (typeof b !== \"function\" && b !== null)\r\n throw new TypeError(\"Class extends value \" + String(b) + \" is not a constructor or null\");\r\n extendStatics(d, b);\r\n function __() { this.constructor = d; }\r\n d.prototype = b === null ? Object.create(b) : (__.prototype = b.prototype, new __());\r\n}\r\n\r\nexport var __assign = function() {\r\n __assign = Object.assign || function __assign(t) {\r\n for (var s, i = 1, n = arguments.length; i < n; i++) {\r\n s = arguments[i];\r\n for (var p in s) if (Object.prototype.hasOwnProperty.call(s, p)) t[p] = s[p];\r\n }\r\n return t;\r\n }\r\n return __assign.apply(this, arguments);\r\n}\r\n\r\nexport function __rest(s, e) {\r\n var t = {};\r\n for (var p in s) if (Object.prototype.hasOwnProperty.call(s, p) && e.indexOf(p) < 0)\r\n t[p] = s[p];\r\n if (s != null && typeof Object.getOwnPropertySymbols === \"function\")\r\n for (var i = 0, p = Object.getOwnPropertySymbols(s); i < p.length; i++) {\r\n if (e.indexOf(p[i]) < 0 && Object.prototype.propertyIsEnumerable.call(s, p[i]))\r\n t[p[i]] = s[p[i]];\r\n }\r\n return t;\r\n}\r\n\r\nexport function __decorate(decorators, target, key, desc) {\r\n var c = arguments.length, r = c < 3 ? target : desc === null ? desc = Object.getOwnPropertyDescriptor(target, key) : desc, d;\r\n if (typeof Reflect === \"object\" && typeof Reflect.decorate === \"function\") r = Reflect.decorate(decorators, target, key, desc);\r\n else for (var i = decorators.length - 1; i >= 0; i--) if (d = decorators[i]) r = (c < 3 ? d(r) : c > 3 ? d(target, key, r) : d(target, key)) || r;\r\n return c > 3 && r && Object.defineProperty(target, key, r), r;\r\n}\r\n\r\nexport function __param(paramIndex, decorator) {\r\n return function (target, key) { decorator(target, key, paramIndex); }\r\n}\r\n\r\nexport function __metadata(metadataKey, metadataValue) {\r\n if (typeof Reflect === \"object\" && typeof Reflect.metadata === \"function\") return Reflect.metadata(metadataKey, metadataValue);\r\n}\r\n\r\nexport function __awaiter(thisArg, _arguments, P, generator) {\r\n function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }\r\n return new (P || (P = Promise))(function (resolve, reject) {\r\n function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }\r\n function rejected(value) { try { step(generator[\"throw\"](value)); } catch (e) { reject(e); } }\r\n function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }\r\n step((generator = generator.apply(thisArg, _arguments || [])).next());\r\n });\r\n}\r\n\r\nexport function __generator(thisArg, body) {\r\n var _ = { label: 0, sent: function() { if (t[0] & 1) throw t[1]; return t[1]; }, trys: [], ops: [] }, f, y, t, g;\r\n return g = { next: verb(0), \"throw\": verb(1), \"return\": verb(2) }, typeof Symbol === \"function\" && (g[Symbol.iterator] = function() { return this; }), g;\r\n function verb(n) { return function (v) { return step([n, v]); }; }\r\n function step(op) {\r\n if (f) throw new TypeError(\"Generator is already executing.\");\r\n while (_) try {\r\n if (f = 1, y && (t = op[0] & 2 ? y[\"return\"] : op[0] ? y[\"throw\"] || ((t = y[\"return\"]) && t.call(y), 0) : y.next) && !(t = t.call(y, op[1])).done) return t;\r\n if (y = 0, t) op = [op[0] & 2, t.value];\r\n switch (op[0]) {\r\n case 0: case 1: t = op; break;\r\n case 4: _.label++; return { value: op[1], done: false };\r\n case 5: _.label++; y = op[1]; op = [0]; continue;\r\n case 7: op = _.ops.pop(); _.trys.pop(); continue;\r\n default:\r\n if (!(t = _.trys, t = t.length > 0 && t[t.length - 1]) && (op[0] === 6 || op[0] === 2)) { _ = 0; continue; }\r\n if (op[0] === 3 && (!t || (op[1] > t[0] && op[1] < t[3]))) { _.label = op[1]; break; }\r\n if (op[0] === 6 && _.label < t[1]) { _.label = t[1]; t = op; break; }\r\n if (t && _.label < t[2]) { _.label = t[2]; _.ops.push(op); break; }\r\n if (t[2]) _.ops.pop();\r\n _.trys.pop(); continue;\r\n }\r\n op = body.call(thisArg, _);\r\n } catch (e) { op = [6, e]; y = 0; } finally { f = t = 0; }\r\n if (op[0] & 5) throw op[1]; return { value: op[0] ? op[1] : void 0, done: true };\r\n }\r\n}\r\n\r\nexport var __createBinding = Object.create ? (function(o, m, k, k2) {\r\n if (k2 === undefined) k2 = k;\r\n Object.defineProperty(o, k2, { enumerable: true, get: function() { return m[k]; } });\r\n}) : (function(o, m, k, k2) {\r\n if (k2 === undefined) k2 = k;\r\n o[k2] = m[k];\r\n});\r\n\r\nexport function __exportStar(m, o) {\r\n for (var p in m) if (p !== \"default\" && !Object.prototype.hasOwnProperty.call(o, p)) __createBinding(o, m, p);\r\n}\r\n\r\nexport function __values(o) {\r\n var s = typeof Symbol === \"function\" && Symbol.iterator, m = s && o[s], i = 0;\r\n if (m) return m.call(o);\r\n if (o && typeof o.length === \"number\") return {\r\n next: function () {\r\n if (o && i >= o.length) o = void 0;\r\n return { value: o && o[i++], done: !o };\r\n }\r\n };\r\n throw new TypeError(s ? \"Object is not iterable.\" : \"Symbol.iterator is not defined.\");\r\n}\r\n\r\nexport function __read(o, n) {\r\n var m = typeof Symbol === \"function\" && o[Symbol.iterator];\r\n if (!m) return o;\r\n var i = m.call(o), r, ar = [], e;\r\n try {\r\n while ((n === void 0 || n-- > 0) && !(r = i.next()).done) ar.push(r.value);\r\n }\r\n catch (error) { e = { error: error }; }\r\n finally {\r\n try {\r\n if (r && !r.done && (m = i[\"return\"])) m.call(i);\r\n }\r\n finally { if (e) throw e.error; }\r\n }\r\n return ar;\r\n}\r\n\r\n/** @deprecated */\r\nexport function __spread() {\r\n for (var ar = [], i = 0; i < arguments.length; i++)\r\n ar = ar.concat(__read(arguments[i]));\r\n return ar;\r\n}\r\n\r\n/** @deprecated */\r\nexport function __spreadArrays() {\r\n for (var s = 0, i = 0, il = arguments.length; i < il; i++) s += arguments[i].length;\r\n for (var r = Array(s), k = 0, i = 0; i < il; i++)\r\n for (var a = arguments[i], j = 0, jl = a.length; j < jl; j++, k++)\r\n r[k] = a[j];\r\n return r;\r\n}\r\n\r\nexport function __spreadArray(to, from, pack) {\r\n if (pack || arguments.length === 2) for (var i = 0, l = from.length, ar; i < l; i++) {\r\n if (ar || !(i in from)) {\r\n if (!ar) ar = Array.prototype.slice.call(from, 0, i);\r\n ar[i] = from[i];\r\n }\r\n }\r\n return to.concat(ar || Array.prototype.slice.call(from));\r\n}\r\n\r\nexport function __await(v) {\r\n return this instanceof __await ? (this.v = v, this) : new __await(v);\r\n}\r\n\r\nexport function __asyncGenerator(thisArg, _arguments, generator) {\r\n if (!Symbol.asyncIterator) throw new TypeError(\"Symbol.asyncIterator is not defined.\");\r\n var g = generator.apply(thisArg, _arguments || []), i, q = [];\r\n return i = {}, verb(\"next\"), verb(\"throw\"), verb(\"return\"), i[Symbol.asyncIterator] = function () { return this; }, i;\r\n function verb(n) { if (g[n]) i[n] = function (v) { return new Promise(function (a, b) { q.push([n, v, a, b]) > 1 || resume(n, v); }); }; }\r\n function resume(n, v) { try { step(g[n](v)); } catch (e) { settle(q[0][3], e); } }\r\n function step(r) { r.value instanceof __await ? Promise.resolve(r.value.v).then(fulfill, reject) : settle(q[0][2], r); }\r\n function fulfill(value) { resume(\"next\", value); }\r\n function reject(value) { resume(\"throw\", value); }\r\n function settle(f, v) { if (f(v), q.shift(), q.length) resume(q[0][0], q[0][1]); }\r\n}\r\n\r\nexport function __asyncDelegator(o) {\r\n var i, p;\r\n return i = {}, verb(\"next\"), verb(\"throw\", function (e) { throw e; }), verb(\"return\"), i[Symbol.iterator] = function () { return this; }, i;\r\n function verb(n, f) { i[n] = o[n] ? function (v) { return (p = !p) ? { value: __await(o[n](v)), done: n === \"return\" } : f ? f(v) : v; } : f; }\r\n}\r\n\r\nexport function __asyncValues(o) {\r\n if (!Symbol.asyncIterator) throw new TypeError(\"Symbol.asyncIterator is not defined.\");\r\n var m = o[Symbol.asyncIterator], i;\r\n return m ? m.call(o) : (o = typeof __values === \"function\" ? __values(o) : o[Symbol.iterator](), i = {}, verb(\"next\"), verb(\"throw\"), verb(\"return\"), i[Symbol.asyncIterator] = function () { return this; }, i);\r\n function verb(n) { i[n] = o[n] && function (v) { return new Promise(function (resolve, reject) { v = o[n](v), settle(resolve, reject, v.done, v.value); }); }; }\r\n function settle(resolve, reject, d, v) { Promise.resolve(v).then(function(v) { resolve({ value: v, done: d }); }, reject); }\r\n}\r\n\r\nexport function __makeTemplateObject(cooked, raw) {\r\n if (Object.defineProperty) { Object.defineProperty(cooked, \"raw\", { value: raw }); } else { cooked.raw = raw; }\r\n return cooked;\r\n};\r\n\r\nvar __setModuleDefault = Object.create ? (function(o, v) {\r\n Object.defineProperty(o, \"default\", { enumerable: true, value: v });\r\n}) : function(o, v) {\r\n o[\"default\"] = v;\r\n};\r\n\r\nexport function __importStar(mod) {\r\n if (mod && mod.__esModule) return mod;\r\n var result = {};\r\n if (mod != null) for (var k in mod) if (k !== \"default\" && Object.prototype.hasOwnProperty.call(mod, k)) __createBinding(result, mod, k);\r\n __setModuleDefault(result, mod);\r\n return result;\r\n}\r\n\r\nexport function __importDefault(mod) {\r\n return (mod && mod.__esModule) ? mod : { default: mod };\r\n}\r\n\r\nexport function __classPrivateFieldGet(receiver, state, kind, f) {\r\n if (kind === \"a\" && !f) throw new TypeError(\"Private accessor was defined without a getter\");\r\n if (typeof state === \"function\" ? receiver !== state || !f : !state.has(receiver)) throw new TypeError(\"Cannot read private member from an object whose class did not declare it\");\r\n return kind === \"m\" ? f : kind === \"a\" ? f.call(receiver) : f ? f.value : state.get(receiver);\r\n}\r\n\r\nexport function __classPrivateFieldSet(receiver, state, value, kind, f) {\r\n if (kind === \"m\") throw new TypeError(\"Private method is not writable\");\r\n if (kind === \"a\" && !f) throw new TypeError(\"Private accessor was defined without a setter\");\r\n if (typeof state === \"function\" ? receiver !== state || !f : !state.has(receiver)) throw new TypeError(\"Cannot write private member to an object whose class did not declare it\");\r\n return (kind === \"a\" ? f.call(receiver, value) : f ? f.value = value : state.set(receiver, value)), value;\r\n}\r\n", "/**\n * Returns true if the object is a function.\n * @param value The value to check\n */\nexport function isFunction(value: any): value is (...args: any[]) => any {\n return typeof value === 'function';\n}\n", "/**\n * Used to create Error subclasses until the community moves away from ES5.\n *\n * This is because compiling from TypeScript down to ES5 has issues with subclassing Errors\n * as well as other built-in types: https://github.com/Microsoft/TypeScript/issues/12123\n *\n * @param createImpl A factory function to create the actual constructor implementation. The returned\n * function should be a named function that calls `_super` internally.\n */\nexport function createErrorClass(createImpl: (_super: any) => any): T {\n const _super = (instance: any) => {\n Error.call(instance);\n instance.stack = new Error().stack;\n };\n\n const ctorFunc = createImpl(_super);\n ctorFunc.prototype = Object.create(Error.prototype);\n ctorFunc.prototype.constructor = ctorFunc;\n return ctorFunc;\n}\n", "import { createErrorClass } from './createErrorClass';\n\nexport interface UnsubscriptionError extends Error {\n readonly errors: any[];\n}\n\nexport interface UnsubscriptionErrorCtor {\n /**\n * @deprecated Internal implementation detail. Do not construct error instances.\n * Cannot be tagged as internal: https://github.com/ReactiveX/rxjs/issues/6269\n */\n new (errors: any[]): UnsubscriptionError;\n}\n\n/**\n * An error thrown when one or more errors have occurred during the\n * `unsubscribe` of a {@link Subscription}.\n */\nexport const UnsubscriptionError: UnsubscriptionErrorCtor = createErrorClass(\n (_super) =>\n function UnsubscriptionErrorImpl(this: any, errors: (Error | string)[]) {\n _super(this);\n this.message = errors\n ? `${errors.length} errors occurred during unsubscription:\n${errors.map((err, i) => `${i + 1}) ${err.toString()}`).join('\\n ')}`\n : '';\n this.name = 'UnsubscriptionError';\n this.errors = errors;\n }\n);\n", "/**\n * Removes an item from an array, mutating it.\n * @param arr The array to remove the item from\n * @param item The item to remove\n */\nexport function arrRemove(arr: T[] | undefined | null, item: T) {\n if (arr) {\n const index = arr.indexOf(item);\n 0 <= index && arr.splice(index, 1);\n }\n}\n", "import { isFunction } from './util/isFunction';\nimport { UnsubscriptionError } from './util/UnsubscriptionError';\nimport { SubscriptionLike, TeardownLogic, Unsubscribable } from './types';\nimport { arrRemove } from './util/arrRemove';\n\n/**\n * Represents a disposable resource, such as the execution of an Observable. A\n * Subscription has one important method, `unsubscribe`, that takes no argument\n * and just disposes the resource held by the subscription.\n *\n * Additionally, subscriptions may be grouped together through the `add()`\n * method, which will attach a child Subscription to the current Subscription.\n * When a Subscription is unsubscribed, all its children (and its grandchildren)\n * will be unsubscribed as well.\n *\n * @class Subscription\n */\nexport class Subscription implements SubscriptionLike {\n /** @nocollapse */\n public static EMPTY = (() => {\n const empty = new Subscription();\n empty.closed = true;\n return empty;\n })();\n\n /**\n * A flag to indicate whether this Subscription has already been unsubscribed.\n */\n public closed = false;\n\n private _parentage: Subscription[] | Subscription | null = null;\n\n /**\n * The list of registered finalizers to execute upon unsubscription. Adding and removing from this\n * list occurs in the {@link #add} and {@link #remove} methods.\n */\n private _finalizers: Exclude[] | null = null;\n\n /**\n * @param initialTeardown A function executed first as part of the finalization\n * process that is kicked off when {@link #unsubscribe} is called.\n */\n constructor(private initialTeardown?: () => void) {}\n\n /**\n * Disposes the resources held by the subscription. May, for instance, cancel\n * an ongoing Observable execution or cancel any other type of work that\n * started when the Subscription was created.\n * @return {void}\n */\n unsubscribe(): void {\n let errors: any[] | undefined;\n\n if (!this.closed) {\n this.closed = true;\n\n // Remove this from it's parents.\n const { _parentage } = this;\n if (_parentage) {\n this._parentage = null;\n if (Array.isArray(_parentage)) {\n for (const parent of _parentage) {\n parent.remove(this);\n }\n } else {\n _parentage.remove(this);\n }\n }\n\n const { initialTeardown: initialFinalizer } = this;\n if (isFunction(initialFinalizer)) {\n try {\n initialFinalizer();\n } catch (e) {\n errors = e instanceof UnsubscriptionError ? e.errors : [e];\n }\n }\n\n const { _finalizers } = this;\n if (_finalizers) {\n this._finalizers = null;\n for (const finalizer of _finalizers) {\n try {\n execFinalizer(finalizer);\n } catch (err) {\n errors = errors ?? [];\n if (err instanceof UnsubscriptionError) {\n errors = [...errors, ...err.errors];\n } else {\n errors.push(err);\n }\n }\n }\n }\n\n if (errors) {\n throw new UnsubscriptionError(errors);\n }\n }\n }\n\n /**\n * Adds a finalizer to this subscription, so that finalization will be unsubscribed/called\n * when this subscription is unsubscribed. If this subscription is already {@link #closed},\n * because it has already been unsubscribed, then whatever finalizer is passed to it\n * will automatically be executed (unless the finalizer itself is also a closed subscription).\n *\n * Closed Subscriptions cannot be added as finalizers to any subscription. Adding a closed\n * subscription to a any subscription will result in no operation. (A noop).\n *\n * Adding a subscription to itself, or adding `null` or `undefined` will not perform any\n * operation at all. (A noop).\n *\n * `Subscription` instances that are added to this instance will automatically remove themselves\n * if they are unsubscribed. Functions and {@link Unsubscribable} objects that you wish to remove\n * will need to be removed manually with {@link #remove}\n *\n * @param teardown The finalization logic to add to this subscription.\n */\n add(teardown: TeardownLogic): void {\n // Only add the finalizer if it's not undefined\n // and don't add a subscription to itself.\n if (teardown && teardown !== this) {\n if (this.closed) {\n // If this subscription is already closed,\n // execute whatever finalizer is handed to it automatically.\n execFinalizer(teardown);\n } else {\n if (teardown instanceof Subscription) {\n // We don't add closed subscriptions, and we don't add the same subscription\n // twice. Subscription unsubscribe is idempotent.\n if (teardown.closed || teardown._hasParent(this)) {\n return;\n }\n teardown._addParent(this);\n }\n (this._finalizers = this._finalizers ?? []).push(teardown);\n }\n }\n }\n\n /**\n * Checks to see if a this subscription already has a particular parent.\n * This will signal that this subscription has already been added to the parent in question.\n * @param parent the parent to check for\n */\n private _hasParent(parent: Subscription) {\n const { _parentage } = this;\n return _parentage === parent || (Array.isArray(_parentage) && _parentage.includes(parent));\n }\n\n /**\n * Adds a parent to this subscription so it can be removed from the parent if it\n * unsubscribes on it's own.\n *\n * NOTE: THIS ASSUMES THAT {@link _hasParent} HAS ALREADY BEEN CHECKED.\n * @param parent The parent subscription to add\n */\n private _addParent(parent: Subscription) {\n const { _parentage } = this;\n this._parentage = Array.isArray(_parentage) ? (_parentage.push(parent), _parentage) : _parentage ? [_parentage, parent] : parent;\n }\n\n /**\n * Called on a child when it is removed via {@link #remove}.\n * @param parent The parent to remove\n */\n private _removeParent(parent: Subscription) {\n const { _parentage } = this;\n if (_parentage === parent) {\n this._parentage = null;\n } else if (Array.isArray(_parentage)) {\n arrRemove(_parentage, parent);\n }\n }\n\n /**\n * Removes a finalizer from this subscription that was previously added with the {@link #add} method.\n *\n * Note that `Subscription` instances, when unsubscribed, will automatically remove themselves\n * from every other `Subscription` they have been added to. This means that using the `remove` method\n * is not a common thing and should be used thoughtfully.\n *\n * If you add the same finalizer instance of a function or an unsubscribable object to a `Subscription` instance\n * more than once, you will need to call `remove` the same number of times to remove all instances.\n *\n * All finalizer instances are removed to free up memory upon unsubscription.\n *\n * @param teardown The finalizer to remove from this subscription\n */\n remove(teardown: Exclude): void {\n const { _finalizers } = this;\n _finalizers && arrRemove(_finalizers, teardown);\n\n if (teardown instanceof Subscription) {\n teardown._removeParent(this);\n }\n }\n}\n\nexport const EMPTY_SUBSCRIPTION = Subscription.EMPTY;\n\nexport function isSubscription(value: any): value is Subscription {\n return (\n value instanceof Subscription ||\n (value && 'closed' in value && isFunction(value.remove) && isFunction(value.add) && isFunction(value.unsubscribe))\n );\n}\n\nfunction execFinalizer(finalizer: Unsubscribable | (() => void)) {\n if (isFunction(finalizer)) {\n finalizer();\n } else {\n finalizer.unsubscribe();\n }\n}\n", "import { Subscriber } from './Subscriber';\nimport { ObservableNotification } from './types';\n\n/**\n * The {@link GlobalConfig} object for RxJS. It is used to configure things\n * like how to react on unhandled errors.\n */\nexport const config: GlobalConfig = {\n onUnhandledError: null,\n onStoppedNotification: null,\n Promise: undefined,\n useDeprecatedSynchronousErrorHandling: false,\n useDeprecatedNextContext: false,\n};\n\n/**\n * The global configuration object for RxJS, used to configure things\n * like how to react on unhandled errors. Accessible via {@link config}\n * object.\n */\nexport interface GlobalConfig {\n /**\n * A registration point for unhandled errors from RxJS. These are errors that\n * cannot were not handled by consuming code in the usual subscription path. For\n * example, if you have this configured, and you subscribe to an observable without\n * providing an error handler, errors from that subscription will end up here. This\n * will _always_ be called asynchronously on another job in the runtime. This is because\n * we do not want errors thrown in this user-configured handler to interfere with the\n * behavior of the library.\n */\n onUnhandledError: ((err: any) => void) | null;\n\n /**\n * A registration point for notifications that cannot be sent to subscribers because they\n * have completed, errored or have been explicitly unsubscribed. By default, next, complete\n * and error notifications sent to stopped subscribers are noops. However, sometimes callers\n * might want a different behavior. For example, with sources that attempt to report errors\n * to stopped subscribers, a caller can configure RxJS to throw an unhandled error instead.\n * This will _always_ be called asynchronously on another job in the runtime. This is because\n * we do not want errors thrown in this user-configured handler to interfere with the\n * behavior of the library.\n */\n onStoppedNotification: ((notification: ObservableNotification, subscriber: Subscriber) => void) | null;\n\n /**\n * The promise constructor used by default for {@link Observable#toPromise toPromise} and {@link Observable#forEach forEach}\n * methods.\n *\n * @deprecated As of version 8, RxJS will no longer support this sort of injection of a\n * Promise constructor. If you need a Promise implementation other than native promises,\n * please polyfill/patch Promise as you see appropriate. Will be removed in v8.\n */\n Promise?: PromiseConstructorLike;\n\n /**\n * If true, turns on synchronous error rethrowing, which is a deprecated behavior\n * in v6 and higher. This behavior enables bad patterns like wrapping a subscribe\n * call in a try/catch block. It also enables producer interference, a nasty bug\n * where a multicast can be broken for all observers by a downstream consumer with\n * an unhandled error. DO NOT USE THIS FLAG UNLESS IT'S NEEDED TO BUY TIME\n * FOR MIGRATION REASONS.\n *\n * @deprecated As of version 8, RxJS will no longer support synchronous throwing\n * of unhandled errors. All errors will be thrown on a separate call stack to prevent bad\n * behaviors described above. Will be removed in v8.\n */\n useDeprecatedSynchronousErrorHandling: boolean;\n\n /**\n * If true, enables an as-of-yet undocumented feature from v5: The ability to access\n * `unsubscribe()` via `this` context in `next` functions created in observers passed\n * to `subscribe`.\n *\n * This is being removed because the performance was severely problematic, and it could also cause\n * issues when types other than POJOs are passed to subscribe as subscribers, as they will likely have\n * their `this` context overwritten.\n *\n * @deprecated As of version 8, RxJS will no longer support altering the\n * context of next functions provided as part of an observer to Subscribe. Instead,\n * you will have access to a subscription or a signal or token that will allow you to do things like\n * unsubscribe and test closed status. Will be removed in v8.\n */\n useDeprecatedNextContext: boolean;\n}\n", "import type { TimerHandle } from './timerHandle';\ntype SetTimeoutFunction = (handler: () => void, timeout?: number, ...args: any[]) => TimerHandle;\ntype ClearTimeoutFunction = (handle: TimerHandle) => void;\n\ninterface TimeoutProvider {\n setTimeout: SetTimeoutFunction;\n clearTimeout: ClearTimeoutFunction;\n delegate:\n | {\n setTimeout: SetTimeoutFunction;\n clearTimeout: ClearTimeoutFunction;\n }\n | undefined;\n}\n\nexport const timeoutProvider: TimeoutProvider = {\n // When accessing the delegate, use the variable rather than `this` so that\n // the functions can be called without being bound to the provider.\n setTimeout(handler: () => void, timeout?: number, ...args) {\n const { delegate } = timeoutProvider;\n if (delegate?.setTimeout) {\n return delegate.setTimeout(handler, timeout, ...args);\n }\n return setTimeout(handler, timeout, ...args);\n },\n clearTimeout(handle) {\n const { delegate } = timeoutProvider;\n return (delegate?.clearTimeout || clearTimeout)(handle as any);\n },\n delegate: undefined,\n};\n", "import { config } from '../config';\nimport { timeoutProvider } from '../scheduler/timeoutProvider';\n\n/**\n * Handles an error on another job either with the user-configured {@link onUnhandledError},\n * or by throwing it on that new job so it can be picked up by `window.onerror`, `process.on('error')`, etc.\n *\n * This should be called whenever there is an error that is out-of-band with the subscription\n * or when an error hits a terminal boundary of the subscription and no error handler was provided.\n *\n * @param err the error to report\n */\nexport function reportUnhandledError(err: any) {\n timeoutProvider.setTimeout(() => {\n const { onUnhandledError } = config;\n if (onUnhandledError) {\n // Execute the user-configured error handler.\n onUnhandledError(err);\n } else {\n // Throw so it is picked up by the runtime's uncaught error mechanism.\n throw err;\n }\n });\n}\n", "/* tslint:disable:no-empty */\nexport function noop() { }\n", "import { CompleteNotification, NextNotification, ErrorNotification } from './types';\n\n/**\n * A completion object optimized for memory use and created to be the\n * same \"shape\" as other notifications in v8.\n * @internal\n */\nexport const COMPLETE_NOTIFICATION = (() => createNotification('C', undefined, undefined) as CompleteNotification)();\n\n/**\n * Internal use only. Creates an optimized error notification that is the same \"shape\"\n * as other notifications.\n * @internal\n */\nexport function errorNotification(error: any): ErrorNotification {\n return createNotification('E', undefined, error) as any;\n}\n\n/**\n * Internal use only. Creates an optimized next notification that is the same \"shape\"\n * as other notifications.\n * @internal\n */\nexport function nextNotification(value: T) {\n return createNotification('N', value, undefined) as NextNotification;\n}\n\n/**\n * Ensures that all notifications created internally have the same \"shape\" in v8.\n *\n * TODO: This is only exported to support a crazy legacy test in `groupBy`.\n * @internal\n */\nexport function createNotification(kind: 'N' | 'E' | 'C', value: any, error: any) {\n return {\n kind,\n value,\n error,\n };\n}\n", "import { config } from '../config';\n\nlet context: { errorThrown: boolean; error: any } | null = null;\n\n/**\n * Handles dealing with errors for super-gross mode. Creates a context, in which\n * any synchronously thrown errors will be passed to {@link captureError}. Which\n * will record the error such that it will be rethrown after the call back is complete.\n * TODO: Remove in v8\n * @param cb An immediately executed function.\n */\nexport function errorContext(cb: () => void) {\n if (config.useDeprecatedSynchronousErrorHandling) {\n const isRoot = !context;\n if (isRoot) {\n context = { errorThrown: false, error: null };\n }\n cb();\n if (isRoot) {\n const { errorThrown, error } = context!;\n context = null;\n if (errorThrown) {\n throw error;\n }\n }\n } else {\n // This is the general non-deprecated path for everyone that\n // isn't crazy enough to use super-gross mode (useDeprecatedSynchronousErrorHandling)\n cb();\n }\n}\n\n/**\n * Captures errors only in super-gross mode.\n * @param err the error to capture\n */\nexport function captureError(err: any) {\n if (config.useDeprecatedSynchronousErrorHandling && context) {\n context.errorThrown = true;\n context.error = err;\n }\n}\n", "import { isFunction } from './util/isFunction';\nimport { Observer, ObservableNotification } from './types';\nimport { isSubscription, Subscription } from './Subscription';\nimport { config } from './config';\nimport { reportUnhandledError } from './util/reportUnhandledError';\nimport { noop } from './util/noop';\nimport { nextNotification, errorNotification, COMPLETE_NOTIFICATION } from './NotificationFactories';\nimport { timeoutProvider } from './scheduler/timeoutProvider';\nimport { captureError } from './util/errorContext';\n\n/**\n * Implements the {@link Observer} interface and extends the\n * {@link Subscription} class. While the {@link Observer} is the public API for\n * consuming the values of an {@link Observable}, all Observers get converted to\n * a Subscriber, in order to provide Subscription-like capabilities such as\n * `unsubscribe`. Subscriber is a common type in RxJS, and crucial for\n * implementing operators, but it is rarely used as a public API.\n *\n * @class Subscriber\n */\nexport class Subscriber extends Subscription implements Observer {\n /**\n * A static factory for a Subscriber, given a (potentially partial) definition\n * of an Observer.\n * @param next The `next` callback of an Observer.\n * @param error The `error` callback of an\n * Observer.\n * @param complete The `complete` callback of an\n * Observer.\n * @return A Subscriber wrapping the (partially defined)\n * Observer represented by the given arguments.\n * @nocollapse\n * @deprecated Do not use. Will be removed in v8. There is no replacement for this\n * method, and there is no reason to be creating instances of `Subscriber` directly.\n * If you have a specific use case, please file an issue.\n */\n static create(next?: (x?: T) => void, error?: (e?: any) => void, complete?: () => void): Subscriber {\n return new SafeSubscriber(next, error, complete);\n }\n\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n protected isStopped: boolean = false;\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n protected destination: Subscriber | Observer; // this `any` is the escape hatch to erase extra type param (e.g. R)\n\n /**\n * @deprecated Internal implementation detail, do not use directly. Will be made internal in v8.\n * There is no reason to directly create an instance of Subscriber. This type is exported for typings reasons.\n */\n constructor(destination?: Subscriber | Observer) {\n super();\n if (destination) {\n this.destination = destination;\n // Automatically chain subscriptions together here.\n // if destination is a Subscription, then it is a Subscriber.\n if (isSubscription(destination)) {\n destination.add(this);\n }\n } else {\n this.destination = EMPTY_OBSERVER;\n }\n }\n\n /**\n * The {@link Observer} callback to receive notifications of type `next` from\n * the Observable, with a value. The Observable may call this method 0 or more\n * times.\n * @param {T} [value] The `next` value.\n * @return {void}\n */\n next(value?: T): void {\n if (this.isStopped) {\n handleStoppedNotification(nextNotification(value), this);\n } else {\n this._next(value!);\n }\n }\n\n /**\n * The {@link Observer} callback to receive notifications of type `error` from\n * the Observable, with an attached `Error`. Notifies the Observer that\n * the Observable has experienced an error condition.\n * @param {any} [err] The `error` exception.\n * @return {void}\n */\n error(err?: any): void {\n if (this.isStopped) {\n handleStoppedNotification(errorNotification(err), this);\n } else {\n this.isStopped = true;\n this._error(err);\n }\n }\n\n /**\n * The {@link Observer} callback to receive a valueless notification of type\n * `complete` from the Observable. Notifies the Observer that the Observable\n * has finished sending push-based notifications.\n * @return {void}\n */\n complete(): void {\n if (this.isStopped) {\n handleStoppedNotification(COMPLETE_NOTIFICATION, this);\n } else {\n this.isStopped = true;\n this._complete();\n }\n }\n\n unsubscribe(): void {\n if (!this.closed) {\n this.isStopped = true;\n super.unsubscribe();\n this.destination = null!;\n }\n }\n\n protected _next(value: T): void {\n this.destination.next(value);\n }\n\n protected _error(err: any): void {\n try {\n this.destination.error(err);\n } finally {\n this.unsubscribe();\n }\n }\n\n protected _complete(): void {\n try {\n this.destination.complete();\n } finally {\n this.unsubscribe();\n }\n }\n}\n\n/**\n * This bind is captured here because we want to be able to have\n * compatibility with monoid libraries that tend to use a method named\n * `bind`. In particular, a library called Monio requires this.\n */\nconst _bind = Function.prototype.bind;\n\nfunction bind any>(fn: Fn, thisArg: any): Fn {\n return _bind.call(fn, thisArg);\n}\n\n/**\n * Internal optimization only, DO NOT EXPOSE.\n * @internal\n */\nclass ConsumerObserver implements Observer {\n constructor(private partialObserver: Partial>) {}\n\n next(value: T): void {\n const { partialObserver } = this;\n if (partialObserver.next) {\n try {\n partialObserver.next(value);\n } catch (error) {\n handleUnhandledError(error);\n }\n }\n }\n\n error(err: any): void {\n const { partialObserver } = this;\n if (partialObserver.error) {\n try {\n partialObserver.error(err);\n } catch (error) {\n handleUnhandledError(error);\n }\n } else {\n handleUnhandledError(err);\n }\n }\n\n complete(): void {\n const { partialObserver } = this;\n if (partialObserver.complete) {\n try {\n partialObserver.complete();\n } catch (error) {\n handleUnhandledError(error);\n }\n }\n }\n}\n\nexport class SafeSubscriber extends Subscriber {\n constructor(\n observerOrNext?: Partial> | ((value: T) => void) | null,\n error?: ((e?: any) => void) | null,\n complete?: (() => void) | null\n ) {\n super();\n\n let partialObserver: Partial>;\n if (isFunction(observerOrNext) || !observerOrNext) {\n // The first argument is a function, not an observer. The next\n // two arguments *could* be observers, or they could be empty.\n partialObserver = {\n next: (observerOrNext ?? undefined) as (((value: T) => void) | undefined),\n error: error ?? undefined,\n complete: complete ?? undefined,\n };\n } else {\n // The first argument is a partial observer.\n let context: any;\n if (this && config.useDeprecatedNextContext) {\n // This is a deprecated path that made `this.unsubscribe()` available in\n // next handler functions passed to subscribe. This only exists behind a flag\n // now, as it is *very* slow.\n context = Object.create(observerOrNext);\n context.unsubscribe = () => this.unsubscribe();\n partialObserver = {\n next: observerOrNext.next && bind(observerOrNext.next, context),\n error: observerOrNext.error && bind(observerOrNext.error, context),\n complete: observerOrNext.complete && bind(observerOrNext.complete, context),\n };\n } else {\n // The \"normal\" path. Just use the partial observer directly.\n partialObserver = observerOrNext;\n }\n }\n\n // Wrap the partial observer to ensure it's a full observer, and\n // make sure proper error handling is accounted for.\n this.destination = new ConsumerObserver(partialObserver);\n }\n}\n\nfunction handleUnhandledError(error: any) {\n if (config.useDeprecatedSynchronousErrorHandling) {\n captureError(error);\n } else {\n // Ideal path, we report this as an unhandled error,\n // which is thrown on a new call stack.\n reportUnhandledError(error);\n }\n}\n\n/**\n * An error handler used when no error handler was supplied\n * to the SafeSubscriber -- meaning no error handler was supplied\n * do the `subscribe` call on our observable.\n * @param err The error to handle\n */\nfunction defaultErrorHandler(err: any) {\n throw err;\n}\n\n/**\n * A handler for notifications that cannot be sent to a stopped subscriber.\n * @param notification The notification being sent\n * @param subscriber The stopped subscriber\n */\nfunction handleStoppedNotification(notification: ObservableNotification, subscriber: Subscriber) {\n const { onStoppedNotification } = config;\n onStoppedNotification && timeoutProvider.setTimeout(() => onStoppedNotification(notification, subscriber));\n}\n\n/**\n * The observer used as a stub for subscriptions where the user did not\n * pass any arguments to `subscribe`. Comes with the default error handling\n * behavior.\n */\nexport const EMPTY_OBSERVER: Readonly> & { closed: true } = {\n closed: true,\n next: noop,\n error: defaultErrorHandler,\n complete: noop,\n};\n", "/**\n * Symbol.observable or a string \"@@observable\". Used for interop\n *\n * @deprecated We will no longer be exporting this symbol in upcoming versions of RxJS.\n * Instead polyfill and use Symbol.observable directly *or* use https://www.npmjs.com/package/symbol-observable\n */\nexport const observable: string | symbol = (() => (typeof Symbol === 'function' && Symbol.observable) || '@@observable')();\n", "/**\n * This function takes one parameter and just returns it. Simply put,\n * this is like `(x: T): T => x`.\n *\n * ## Examples\n *\n * This is useful in some cases when using things like `mergeMap`\n *\n * ```ts\n * import { interval, take, map, range, mergeMap, identity } from 'rxjs';\n *\n * const source$ = interval(1000).pipe(take(5));\n *\n * const result$ = source$.pipe(\n * map(i => range(i)),\n * mergeMap(identity) // same as mergeMap(x => x)\n * );\n *\n * result$.subscribe({\n * next: console.log\n * });\n * ```\n *\n * Or when you want to selectively apply an operator\n *\n * ```ts\n * import { interval, take, identity } from 'rxjs';\n *\n * const shouldLimit = () => Math.random() < 0.5;\n *\n * const source$ = interval(1000);\n *\n * const result$ = source$.pipe(shouldLimit() ? take(5) : identity);\n *\n * result$.subscribe({\n * next: console.log\n * });\n * ```\n *\n * @param x Any value that is returned by this function\n * @returns The value passed as the first parameter to this function\n */\nexport function identity(x: T): T {\n return x;\n}\n", "import { identity } from './identity';\nimport { UnaryFunction } from '../types';\n\nexport function pipe(): typeof identity;\nexport function pipe(fn1: UnaryFunction): UnaryFunction;\nexport function pipe(fn1: UnaryFunction, fn2: UnaryFunction): UnaryFunction;\nexport function pipe(fn1: UnaryFunction, fn2: UnaryFunction, fn3: UnaryFunction): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction\n): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction,\n fn5: UnaryFunction\n): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction,\n fn5: UnaryFunction,\n fn6: UnaryFunction\n): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction,\n fn5: UnaryFunction,\n fn6: UnaryFunction,\n fn7: UnaryFunction\n): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction,\n fn5: UnaryFunction,\n fn6: UnaryFunction,\n fn7: UnaryFunction,\n fn8: UnaryFunction\n): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction,\n fn5: UnaryFunction,\n fn6: UnaryFunction,\n fn7: UnaryFunction,\n fn8: UnaryFunction,\n fn9: UnaryFunction\n): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction,\n fn5: UnaryFunction,\n fn6: UnaryFunction,\n fn7: UnaryFunction,\n fn8: UnaryFunction,\n fn9: UnaryFunction,\n ...fns: UnaryFunction[]\n): UnaryFunction;\n\n/**\n * pipe() can be called on one or more functions, each of which can take one argument (\"UnaryFunction\")\n * and uses it to return a value.\n * It returns a function that takes one argument, passes it to the first UnaryFunction, and then\n * passes the result to the next one, passes that result to the next one, and so on. \n */\nexport function pipe(...fns: Array>): UnaryFunction {\n return pipeFromArray(fns);\n}\n\n/** @internal */\nexport function pipeFromArray(fns: Array>): UnaryFunction {\n if (fns.length === 0) {\n return identity as UnaryFunction;\n }\n\n if (fns.length === 1) {\n return fns[0];\n }\n\n return function piped(input: T): R {\n return fns.reduce((prev: any, fn: UnaryFunction) => fn(prev), input as any);\n };\n}\n", "import { Operator } from './Operator';\nimport { SafeSubscriber, Subscriber } from './Subscriber';\nimport { isSubscription, Subscription } from './Subscription';\nimport { TeardownLogic, OperatorFunction, Subscribable, Observer } from './types';\nimport { observable as Symbol_observable } from './symbol/observable';\nimport { pipeFromArray } from './util/pipe';\nimport { config } from './config';\nimport { isFunction } from './util/isFunction';\nimport { errorContext } from './util/errorContext';\n\n/**\n * A representation of any set of values over any amount of time. This is the most basic building block\n * of RxJS.\n *\n * @class Observable\n */\nexport class Observable implements Subscribable {\n /**\n * @deprecated Internal implementation detail, do not use directly. Will be made internal in v8.\n */\n source: Observable | undefined;\n\n /**\n * @deprecated Internal implementation detail, do not use directly. Will be made internal in v8.\n */\n operator: Operator | undefined;\n\n /**\n * @constructor\n * @param {Function} subscribe the function that is called when the Observable is\n * initially subscribed to. This function is given a Subscriber, to which new values\n * can be `next`ed, or an `error` method can be called to raise an error, or\n * `complete` can be called to notify of a successful completion.\n */\n constructor(subscribe?: (this: Observable, subscriber: Subscriber) => TeardownLogic) {\n if (subscribe) {\n this._subscribe = subscribe;\n }\n }\n\n // HACK: Since TypeScript inherits static properties too, we have to\n // fight against TypeScript here so Subject can have a different static create signature\n /**\n * Creates a new Observable by calling the Observable constructor\n * @owner Observable\n * @method create\n * @param {Function} subscribe? the subscriber function to be passed to the Observable constructor\n * @return {Observable} a new observable\n * @nocollapse\n * @deprecated Use `new Observable()` instead. Will be removed in v8.\n */\n static create: (...args: any[]) => any = (subscribe?: (subscriber: Subscriber) => TeardownLogic) => {\n return new Observable(subscribe);\n };\n\n /**\n * Creates a new Observable, with this Observable instance as the source, and the passed\n * operator defined as the new observable's operator.\n * @method lift\n * @param operator the operator defining the operation to take on the observable\n * @return a new observable with the Operator applied\n * @deprecated Internal implementation detail, do not use directly. Will be made internal in v8.\n * If you have implemented an operator using `lift`, it is recommended that you create an\n * operator by simply returning `new Observable()` directly. See \"Creating new operators from\n * scratch\" section here: https://rxjs.dev/guide/operators\n */\n lift(operator?: Operator): Observable {\n const observable = new Observable();\n observable.source = this;\n observable.operator = operator;\n return observable;\n }\n\n subscribe(observerOrNext?: Partial> | ((value: T) => void)): Subscription;\n /** @deprecated Instead of passing separate callback arguments, use an observer argument. Signatures taking separate callback arguments will be removed in v8. Details: https://rxjs.dev/deprecations/subscribe-arguments */\n subscribe(next?: ((value: T) => void) | null, error?: ((error: any) => void) | null, complete?: (() => void) | null): Subscription;\n /**\n * Invokes an execution of an Observable and registers Observer handlers for notifications it will emit.\n *\n * Use it when you have all these Observables, but still nothing is happening.\n *\n * `subscribe` is not a regular operator, but a method that calls Observable's internal `subscribe` function. It\n * might be for example a function that you passed to Observable's constructor, but most of the time it is\n * a library implementation, which defines what will be emitted by an Observable, and when it be will emitted. This means\n * that calling `subscribe` is actually the moment when Observable starts its work, not when it is created, as it is often\n * the thought.\n *\n * Apart from starting the execution of an Observable, this method allows you to listen for values\n * that an Observable emits, as well as for when it completes or errors. You can achieve this in two\n * of the following ways.\n *\n * The first way is creating an object that implements {@link Observer} interface. It should have methods\n * defined by that interface, but note that it should be just a regular JavaScript object, which you can create\n * yourself in any way you want (ES6 class, classic function constructor, object literal etc.). In particular, do\n * not attempt to use any RxJS implementation details to create Observers - you don't need them. Remember also\n * that your object does not have to implement all methods. If you find yourself creating a method that doesn't\n * do anything, you can simply omit it. Note however, if the `error` method is not provided and an error happens,\n * it will be thrown asynchronously. Errors thrown asynchronously cannot be caught using `try`/`catch`. Instead,\n * use the {@link onUnhandledError} configuration option or use a runtime handler (like `window.onerror` or\n * `process.on('error)`) to be notified of unhandled errors. Because of this, it's recommended that you provide\n * an `error` method to avoid missing thrown errors.\n *\n * The second way is to give up on Observer object altogether and simply provide callback functions in place of its methods.\n * This means you can provide three functions as arguments to `subscribe`, where the first function is equivalent\n * of a `next` method, the second of an `error` method and the third of a `complete` method. Just as in case of an Observer,\n * if you do not need to listen for something, you can omit a function by passing `undefined` or `null`,\n * since `subscribe` recognizes these functions by where they were placed in function call. When it comes\n * to the `error` function, as with an Observer, if not provided, errors emitted by an Observable will be thrown asynchronously.\n *\n * You can, however, subscribe with no parameters at all. This may be the case where you're not interested in terminal events\n * and you also handled emissions internally by using operators (e.g. using `tap`).\n *\n * Whichever style of calling `subscribe` you use, in both cases it returns a Subscription object.\n * This object allows you to call `unsubscribe` on it, which in turn will stop the work that an Observable does and will clean\n * up all resources that an Observable used. Note that cancelling a subscription will not call `complete` callback\n * provided to `subscribe` function, which is reserved for a regular completion signal that comes from an Observable.\n *\n * Remember that callbacks provided to `subscribe` are not guaranteed to be called asynchronously.\n * It is an Observable itself that decides when these functions will be called. For example {@link of}\n * by default emits all its values synchronously. Always check documentation for how given Observable\n * will behave when subscribed and if its default behavior can be modified with a `scheduler`.\n *\n * #### Examples\n *\n * Subscribe with an {@link guide/observer Observer}\n *\n * ```ts\n * import { of } from 'rxjs';\n *\n * const sumObserver = {\n * sum: 0,\n * next(value) {\n * console.log('Adding: ' + value);\n * this.sum = this.sum + value;\n * },\n * error() {\n * // We actually could just remove this method,\n * // since we do not really care about errors right now.\n * },\n * complete() {\n * console.log('Sum equals: ' + this.sum);\n * }\n * };\n *\n * of(1, 2, 3) // Synchronously emits 1, 2, 3 and then completes.\n * .subscribe(sumObserver);\n *\n * // Logs:\n * // 'Adding: 1'\n * // 'Adding: 2'\n * // 'Adding: 3'\n * // 'Sum equals: 6'\n * ```\n *\n * Subscribe with functions ({@link deprecations/subscribe-arguments deprecated})\n *\n * ```ts\n * import { of } from 'rxjs'\n *\n * let sum = 0;\n *\n * of(1, 2, 3).subscribe(\n * value => {\n * console.log('Adding: ' + value);\n * sum = sum + value;\n * },\n * undefined,\n * () => console.log('Sum equals: ' + sum)\n * );\n *\n * // Logs:\n * // 'Adding: 1'\n * // 'Adding: 2'\n * // 'Adding: 3'\n * // 'Sum equals: 6'\n * ```\n *\n * Cancel a subscription\n *\n * ```ts\n * import { interval } from 'rxjs';\n *\n * const subscription = interval(1000).subscribe({\n * next(num) {\n * console.log(num)\n * },\n * complete() {\n * // Will not be called, even when cancelling subscription.\n * console.log('completed!');\n * }\n * });\n *\n * setTimeout(() => {\n * subscription.unsubscribe();\n * console.log('unsubscribed!');\n * }, 2500);\n *\n * // Logs:\n * // 0 after 1s\n * // 1 after 2s\n * // 'unsubscribed!' after 2.5s\n * ```\n *\n * @param {Observer|Function} observerOrNext (optional) Either an observer with methods to be called,\n * or the first of three possible handlers, which is the handler for each value emitted from the subscribed\n * Observable.\n * @param {Function} error (optional) A handler for a terminal event resulting from an error. If no error handler is provided,\n * the error will be thrown asynchronously as unhandled.\n * @param {Function} complete (optional) A handler for a terminal event resulting from successful completion.\n * @return {Subscription} a subscription reference to the registered handlers\n * @method subscribe\n */\n subscribe(\n observerOrNext?: Partial> | ((value: T) => void) | null,\n error?: ((error: any) => void) | null,\n complete?: (() => void) | null\n ): Subscription {\n const subscriber = isSubscriber(observerOrNext) ? observerOrNext : new SafeSubscriber(observerOrNext, error, complete);\n\n errorContext(() => {\n const { operator, source } = this;\n subscriber.add(\n operator\n ? // We're dealing with a subscription in the\n // operator chain to one of our lifted operators.\n operator.call(subscriber, source)\n : source\n ? // If `source` has a value, but `operator` does not, something that\n // had intimate knowledge of our API, like our `Subject`, must have\n // set it. We're going to just call `_subscribe` directly.\n this._subscribe(subscriber)\n : // In all other cases, we're likely wrapping a user-provided initializer\n // function, so we need to catch errors and handle them appropriately.\n this._trySubscribe(subscriber)\n );\n });\n\n return subscriber;\n }\n\n /** @internal */\n protected _trySubscribe(sink: Subscriber): TeardownLogic {\n try {\n return this._subscribe(sink);\n } catch (err) {\n // We don't need to return anything in this case,\n // because it's just going to try to `add()` to a subscription\n // above.\n sink.error(err);\n }\n }\n\n /**\n * Used as a NON-CANCELLABLE means of subscribing to an observable, for use with\n * APIs that expect promises, like `async/await`. You cannot unsubscribe from this.\n *\n * **WARNING**: Only use this with observables you *know* will complete. If the source\n * observable does not complete, you will end up with a promise that is hung up, and\n * potentially all of the state of an async function hanging out in memory. To avoid\n * this situation, look into adding something like {@link timeout}, {@link take},\n * {@link takeWhile}, or {@link takeUntil} amongst others.\n *\n * #### Example\n *\n * ```ts\n * import { interval, take } from 'rxjs';\n *\n * const source$ = interval(1000).pipe(take(4));\n *\n * async function getTotal() {\n * let total = 0;\n *\n * await source$.forEach(value => {\n * total += value;\n * console.log('observable -> ' + value);\n * });\n *\n * return total;\n * }\n *\n * getTotal().then(\n * total => console.log('Total: ' + total)\n * );\n *\n * // Expected:\n * // 'observable -> 0'\n * // 'observable -> 1'\n * // 'observable -> 2'\n * // 'observable -> 3'\n * // 'Total: 6'\n * ```\n *\n * @param next a handler for each value emitted by the observable\n * @return a promise that either resolves on observable completion or\n * rejects with the handled error\n */\n forEach(next: (value: T) => void): Promise;\n\n /**\n * @param next a handler for each value emitted by the observable\n * @param promiseCtor a constructor function used to instantiate the Promise\n * @return a promise that either resolves on observable completion or\n * rejects with the handled error\n * @deprecated Passing a Promise constructor will no longer be available\n * in upcoming versions of RxJS. This is because it adds weight to the library, for very\n * little benefit. If you need this functionality, it is recommended that you either\n * polyfill Promise, or you create an adapter to convert the returned native promise\n * to whatever promise implementation you wanted. Will be removed in v8.\n */\n forEach(next: (value: T) => void, promiseCtor: PromiseConstructorLike): Promise;\n\n forEach(next: (value: T) => void, promiseCtor?: PromiseConstructorLike): Promise {\n promiseCtor = getPromiseCtor(promiseCtor);\n\n return new promiseCtor((resolve, reject) => {\n const subscriber = new SafeSubscriber({\n next: (value) => {\n try {\n next(value);\n } catch (err) {\n reject(err);\n subscriber.unsubscribe();\n }\n },\n error: reject,\n complete: resolve,\n });\n this.subscribe(subscriber);\n }) as Promise;\n }\n\n /** @internal */\n protected _subscribe(subscriber: Subscriber): TeardownLogic {\n return this.source?.subscribe(subscriber);\n }\n\n /**\n * An interop point defined by the es7-observable spec https://github.com/zenparsing/es-observable\n * @method Symbol.observable\n * @return {Observable} this instance of the observable\n */\n [Symbol_observable]() {\n return this;\n }\n\n /* tslint:disable:max-line-length */\n pipe(): Observable;\n pipe(op1: OperatorFunction): Observable;\n pipe(op1: OperatorFunction, op2: OperatorFunction): Observable;\n pipe(op1: OperatorFunction, op2: OperatorFunction, op3: OperatorFunction): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction\n ): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction,\n op5: OperatorFunction\n ): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction,\n op5: OperatorFunction,\n op6: OperatorFunction\n ): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction,\n op5: OperatorFunction,\n op6: OperatorFunction,\n op7: OperatorFunction\n ): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction,\n op5: OperatorFunction,\n op6: OperatorFunction,\n op7: OperatorFunction,\n op8: OperatorFunction\n ): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction,\n op5: OperatorFunction,\n op6: OperatorFunction,\n op7: OperatorFunction,\n op8: OperatorFunction,\n op9: OperatorFunction\n ): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction,\n op5: OperatorFunction,\n op6: OperatorFunction,\n op7: OperatorFunction,\n op8: OperatorFunction,\n op9: OperatorFunction,\n ...operations: OperatorFunction[]\n ): Observable;\n /* tslint:enable:max-line-length */\n\n /**\n * Used to stitch together functional operators into a chain.\n * @method pipe\n * @return {Observable} the Observable result of all of the operators having\n * been called in the order they were passed in.\n *\n * ## Example\n *\n * ```ts\n * import { interval, filter, map, scan } from 'rxjs';\n *\n * interval(1000)\n * .pipe(\n * filter(x => x % 2 === 0),\n * map(x => x + x),\n * scan((acc, x) => acc + x)\n * )\n * .subscribe(x => console.log(x));\n * ```\n */\n pipe(...operations: OperatorFunction[]): Observable {\n return pipeFromArray(operations)(this);\n }\n\n /* tslint:disable:max-line-length */\n /** @deprecated Replaced with {@link firstValueFrom} and {@link lastValueFrom}. Will be removed in v8. Details: https://rxjs.dev/deprecations/to-promise */\n toPromise(): Promise;\n /** @deprecated Replaced with {@link firstValueFrom} and {@link lastValueFrom}. Will be removed in v8. Details: https://rxjs.dev/deprecations/to-promise */\n toPromise(PromiseCtor: typeof Promise): Promise;\n /** @deprecated Replaced with {@link firstValueFrom} and {@link lastValueFrom}. Will be removed in v8. Details: https://rxjs.dev/deprecations/to-promise */\n toPromise(PromiseCtor: PromiseConstructorLike): Promise;\n /* tslint:enable:max-line-length */\n\n /**\n * Subscribe to this Observable and get a Promise resolving on\n * `complete` with the last emission (if any).\n *\n * **WARNING**: Only use this with observables you *know* will complete. If the source\n * observable does not complete, you will end up with a promise that is hung up, and\n * potentially all of the state of an async function hanging out in memory. To avoid\n * this situation, look into adding something like {@link timeout}, {@link take},\n * {@link takeWhile}, or {@link takeUntil} amongst others.\n *\n * @method toPromise\n * @param [promiseCtor] a constructor function used to instantiate\n * the Promise\n * @return A Promise that resolves with the last value emit, or\n * rejects on an error. If there were no emissions, Promise\n * resolves with undefined.\n * @deprecated Replaced with {@link firstValueFrom} and {@link lastValueFrom}. Will be removed in v8. Details: https://rxjs.dev/deprecations/to-promise\n */\n toPromise(promiseCtor?: PromiseConstructorLike): Promise {\n promiseCtor = getPromiseCtor(promiseCtor);\n\n return new promiseCtor((resolve, reject) => {\n let value: T | undefined;\n this.subscribe(\n (x: T) => (value = x),\n (err: any) => reject(err),\n () => resolve(value)\n );\n }) as Promise;\n }\n}\n\n/**\n * Decides between a passed promise constructor from consuming code,\n * A default configured promise constructor, and the native promise\n * constructor and returns it. If nothing can be found, it will throw\n * an error.\n * @param promiseCtor The optional promise constructor to passed by consuming code\n */\nfunction getPromiseCtor(promiseCtor: PromiseConstructorLike | undefined) {\n return promiseCtor ?? config.Promise ?? Promise;\n}\n\nfunction isObserver(value: any): value is Observer {\n return value && isFunction(value.next) && isFunction(value.error) && isFunction(value.complete);\n}\n\nfunction isSubscriber(value: any): value is Subscriber {\n return (value && value instanceof Subscriber) || (isObserver(value) && isSubscription(value));\n}\n", "import { Observable } from '../Observable';\nimport { Subscriber } from '../Subscriber';\nimport { OperatorFunction } from '../types';\nimport { isFunction } from './isFunction';\n\n/**\n * Used to determine if an object is an Observable with a lift function.\n */\nexport function hasLift(source: any): source is { lift: InstanceType['lift'] } {\n return isFunction(source?.lift);\n}\n\n/**\n * Creates an `OperatorFunction`. Used to define operators throughout the library in a concise way.\n * @param init The logic to connect the liftedSource to the subscriber at the moment of subscription.\n */\nexport function operate(\n init: (liftedSource: Observable, subscriber: Subscriber) => (() => void) | void\n): OperatorFunction {\n return (source: Observable) => {\n if (hasLift(source)) {\n return source.lift(function (this: Subscriber, liftedSource: Observable) {\n try {\n return init(liftedSource, this);\n } catch (err) {\n this.error(err);\n }\n });\n }\n throw new TypeError('Unable to lift unknown Observable type');\n };\n}\n", "import { Subscriber } from '../Subscriber';\n\n/**\n * Creates an instance of an `OperatorSubscriber`.\n * @param destination The downstream subscriber.\n * @param onNext Handles next values, only called if this subscriber is not stopped or closed. Any\n * error that occurs in this function is caught and sent to the `error` method of this subscriber.\n * @param onError Handles errors from the subscription, any errors that occur in this handler are caught\n * and send to the `destination` error handler.\n * @param onComplete Handles completion notification from the subscription. Any errors that occur in\n * this handler are sent to the `destination` error handler.\n * @param onFinalize Additional teardown logic here. This will only be called on teardown if the\n * subscriber itself is not already closed. This is called after all other teardown logic is executed.\n */\nexport function createOperatorSubscriber(\n destination: Subscriber,\n onNext?: (value: T) => void,\n onComplete?: () => void,\n onError?: (err: any) => void,\n onFinalize?: () => void\n): Subscriber {\n return new OperatorSubscriber(destination, onNext, onComplete, onError, onFinalize);\n}\n\n/**\n * A generic helper for allowing operators to be created with a Subscriber and\n * use closures to capture necessary state from the operator function itself.\n */\nexport class OperatorSubscriber extends Subscriber {\n /**\n * Creates an instance of an `OperatorSubscriber`.\n * @param destination The downstream subscriber.\n * @param onNext Handles next values, only called if this subscriber is not stopped or closed. Any\n * error that occurs in this function is caught and sent to the `error` method of this subscriber.\n * @param onError Handles errors from the subscription, any errors that occur in this handler are caught\n * and send to the `destination` error handler.\n * @param onComplete Handles completion notification from the subscription. Any errors that occur in\n * this handler are sent to the `destination` error handler.\n * @param onFinalize Additional finalization logic here. This will only be called on finalization if the\n * subscriber itself is not already closed. This is called after all other finalization logic is executed.\n * @param shouldUnsubscribe An optional check to see if an unsubscribe call should truly unsubscribe.\n * NOTE: This currently **ONLY** exists to support the strange behavior of {@link groupBy}, where unsubscription\n * to the resulting observable does not actually disconnect from the source if there are active subscriptions\n * to any grouped observable. (DO NOT EXPOSE OR USE EXTERNALLY!!!)\n */\n constructor(\n destination: Subscriber,\n onNext?: (value: T) => void,\n onComplete?: () => void,\n onError?: (err: any) => void,\n private onFinalize?: () => void,\n private shouldUnsubscribe?: () => boolean\n ) {\n // It's important - for performance reasons - that all of this class's\n // members are initialized and that they are always initialized in the same\n // order. This will ensure that all OperatorSubscriber instances have the\n // same hidden class in V8. This, in turn, will help keep the number of\n // hidden classes involved in property accesses within the base class as\n // low as possible. If the number of hidden classes involved exceeds four,\n // the property accesses will become megamorphic and performance penalties\n // will be incurred - i.e. inline caches won't be used.\n //\n // The reasons for ensuring all instances have the same hidden class are\n // further discussed in this blog post from Benedikt Meurer:\n // https://benediktmeurer.de/2018/03/23/impact-of-polymorphism-on-component-based-frameworks-like-react/\n super(destination);\n this._next = onNext\n ? function (this: OperatorSubscriber, value: T) {\n try {\n onNext(value);\n } catch (err) {\n destination.error(err);\n }\n }\n : super._next;\n this._error = onError\n ? function (this: OperatorSubscriber, err: any) {\n try {\n onError(err);\n } catch (err) {\n // Send any errors that occur down stream.\n destination.error(err);\n } finally {\n // Ensure finalization.\n this.unsubscribe();\n }\n }\n : super._error;\n this._complete = onComplete\n ? function (this: OperatorSubscriber) {\n try {\n onComplete();\n } catch (err) {\n // Send any errors that occur down stream.\n destination.error(err);\n } finally {\n // Ensure finalization.\n this.unsubscribe();\n }\n }\n : super._complete;\n }\n\n unsubscribe() {\n if (!this.shouldUnsubscribe || this.shouldUnsubscribe()) {\n const { closed } = this;\n super.unsubscribe();\n // Execute additional teardown if we have any and we didn't already do so.\n !closed && this.onFinalize?.();\n }\n }\n}\n", "import { Subscription } from '../Subscription';\n\ninterface AnimationFrameProvider {\n schedule(callback: FrameRequestCallback): Subscription;\n requestAnimationFrame: typeof requestAnimationFrame;\n cancelAnimationFrame: typeof cancelAnimationFrame;\n delegate:\n | {\n requestAnimationFrame: typeof requestAnimationFrame;\n cancelAnimationFrame: typeof cancelAnimationFrame;\n }\n | undefined;\n}\n\nexport const animationFrameProvider: AnimationFrameProvider = {\n // When accessing the delegate, use the variable rather than `this` so that\n // the functions can be called without being bound to the provider.\n schedule(callback) {\n let request = requestAnimationFrame;\n let cancel: typeof cancelAnimationFrame | undefined = cancelAnimationFrame;\n const { delegate } = animationFrameProvider;\n if (delegate) {\n request = delegate.requestAnimationFrame;\n cancel = delegate.cancelAnimationFrame;\n }\n const handle = request((timestamp) => {\n // Clear the cancel function. The request has been fulfilled, so\n // attempting to cancel the request upon unsubscription would be\n // pointless.\n cancel = undefined;\n callback(timestamp);\n });\n return new Subscription(() => cancel?.(handle));\n },\n requestAnimationFrame(...args) {\n const { delegate } = animationFrameProvider;\n return (delegate?.requestAnimationFrame || requestAnimationFrame)(...args);\n },\n cancelAnimationFrame(...args) {\n const { delegate } = animationFrameProvider;\n return (delegate?.cancelAnimationFrame || cancelAnimationFrame)(...args);\n },\n delegate: undefined,\n};\n", "import { createErrorClass } from './createErrorClass';\n\nexport interface ObjectUnsubscribedError extends Error {}\n\nexport interface ObjectUnsubscribedErrorCtor {\n /**\n * @deprecated Internal implementation detail. Do not construct error instances.\n * Cannot be tagged as internal: https://github.com/ReactiveX/rxjs/issues/6269\n */\n new (): ObjectUnsubscribedError;\n}\n\n/**\n * An error thrown when an action is invalid because the object has been\n * unsubscribed.\n *\n * @see {@link Subject}\n * @see {@link BehaviorSubject}\n *\n * @class ObjectUnsubscribedError\n */\nexport const ObjectUnsubscribedError: ObjectUnsubscribedErrorCtor = createErrorClass(\n (_super) =>\n function ObjectUnsubscribedErrorImpl(this: any) {\n _super(this);\n this.name = 'ObjectUnsubscribedError';\n this.message = 'object unsubscribed';\n }\n);\n", "import { Operator } from './Operator';\nimport { Observable } from './Observable';\nimport { Subscriber } from './Subscriber';\nimport { Subscription, EMPTY_SUBSCRIPTION } from './Subscription';\nimport { Observer, SubscriptionLike, TeardownLogic } from './types';\nimport { ObjectUnsubscribedError } from './util/ObjectUnsubscribedError';\nimport { arrRemove } from './util/arrRemove';\nimport { errorContext } from './util/errorContext';\n\n/**\n * A Subject is a special type of Observable that allows values to be\n * multicasted to many Observers. Subjects are like EventEmitters.\n *\n * Every Subject is an Observable and an Observer. You can subscribe to a\n * Subject, and you can call next to feed values as well as error and complete.\n */\nexport class Subject extends Observable implements SubscriptionLike {\n closed = false;\n\n private currentObservers: Observer[] | null = null;\n\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n observers: Observer[] = [];\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n isStopped = false;\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n hasError = false;\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n thrownError: any = null;\n\n /**\n * Creates a \"subject\" by basically gluing an observer to an observable.\n *\n * @nocollapse\n * @deprecated Recommended you do not use. Will be removed at some point in the future. Plans for replacement still under discussion.\n */\n static create: (...args: any[]) => any = (destination: Observer, source: Observable): AnonymousSubject => {\n return new AnonymousSubject(destination, source);\n };\n\n constructor() {\n // NOTE: This must be here to obscure Observable's constructor.\n super();\n }\n\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n lift(operator: Operator): Observable {\n const subject = new AnonymousSubject(this, this);\n subject.operator = operator as any;\n return subject as any;\n }\n\n /** @internal */\n protected _throwIfClosed() {\n if (this.closed) {\n throw new ObjectUnsubscribedError();\n }\n }\n\n next(value: T) {\n errorContext(() => {\n this._throwIfClosed();\n if (!this.isStopped) {\n if (!this.currentObservers) {\n this.currentObservers = Array.from(this.observers);\n }\n for (const observer of this.currentObservers) {\n observer.next(value);\n }\n }\n });\n }\n\n error(err: any) {\n errorContext(() => {\n this._throwIfClosed();\n if (!this.isStopped) {\n this.hasError = this.isStopped = true;\n this.thrownError = err;\n const { observers } = this;\n while (observers.length) {\n observers.shift()!.error(err);\n }\n }\n });\n }\n\n complete() {\n errorContext(() => {\n this._throwIfClosed();\n if (!this.isStopped) {\n this.isStopped = true;\n const { observers } = this;\n while (observers.length) {\n observers.shift()!.complete();\n }\n }\n });\n }\n\n unsubscribe() {\n this.isStopped = this.closed = true;\n this.observers = this.currentObservers = null!;\n }\n\n get observed() {\n return this.observers?.length > 0;\n }\n\n /** @internal */\n protected _trySubscribe(subscriber: Subscriber): TeardownLogic {\n this._throwIfClosed();\n return super._trySubscribe(subscriber);\n }\n\n /** @internal */\n protected _subscribe(subscriber: Subscriber): Subscription {\n this._throwIfClosed();\n this._checkFinalizedStatuses(subscriber);\n return this._innerSubscribe(subscriber);\n }\n\n /** @internal */\n protected _innerSubscribe(subscriber: Subscriber) {\n const { hasError, isStopped, observers } = this;\n if (hasError || isStopped) {\n return EMPTY_SUBSCRIPTION;\n }\n this.currentObservers = null;\n observers.push(subscriber);\n return new Subscription(() => {\n this.currentObservers = null;\n arrRemove(observers, subscriber);\n });\n }\n\n /** @internal */\n protected _checkFinalizedStatuses(subscriber: Subscriber) {\n const { hasError, thrownError, isStopped } = this;\n if (hasError) {\n subscriber.error(thrownError);\n } else if (isStopped) {\n subscriber.complete();\n }\n }\n\n /**\n * Creates a new Observable with this Subject as the source. You can do this\n * to create custom Observer-side logic of the Subject and conceal it from\n * code that uses the Observable.\n * @return {Observable} Observable that the Subject casts to\n */\n asObservable(): Observable {\n const observable: any = new Observable();\n observable.source = this;\n return observable;\n }\n}\n\n/**\n * @class AnonymousSubject\n */\nexport class AnonymousSubject extends Subject {\n constructor(\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n public destination?: Observer,\n source?: Observable\n ) {\n super();\n this.source = source;\n }\n\n next(value: T) {\n this.destination?.next?.(value);\n }\n\n error(err: any) {\n this.destination?.error?.(err);\n }\n\n complete() {\n this.destination?.complete?.();\n }\n\n /** @internal */\n protected _subscribe(subscriber: Subscriber): Subscription {\n return this.source?.subscribe(subscriber) ?? EMPTY_SUBSCRIPTION;\n }\n}\n", "import { Subject } from './Subject';\nimport { Subscriber } from './Subscriber';\nimport { Subscription } from './Subscription';\n\n/**\n * A variant of Subject that requires an initial value and emits its current\n * value whenever it is subscribed to.\n *\n * @class BehaviorSubject\n */\nexport class BehaviorSubject extends Subject {\n constructor(private _value: T) {\n super();\n }\n\n get value(): T {\n return this.getValue();\n }\n\n /** @internal */\n protected _subscribe(subscriber: Subscriber): Subscription {\n const subscription = super._subscribe(subscriber);\n !subscription.closed && subscriber.next(this._value);\n return subscription;\n }\n\n getValue(): T {\n const { hasError, thrownError, _value } = this;\n if (hasError) {\n throw thrownError;\n }\n this._throwIfClosed();\n return _value;\n }\n\n next(value: T): void {\n super.next((this._value = value));\n }\n}\n", "import { TimestampProvider } from '../types';\n\ninterface DateTimestampProvider extends TimestampProvider {\n delegate: TimestampProvider | undefined;\n}\n\nexport const dateTimestampProvider: DateTimestampProvider = {\n now() {\n // Use the variable rather than `this` so that the function can be called\n // without being bound to the provider.\n return (dateTimestampProvider.delegate || Date).now();\n },\n delegate: undefined,\n};\n", "import { Subject } from './Subject';\nimport { TimestampProvider } from './types';\nimport { Subscriber } from './Subscriber';\nimport { Subscription } from './Subscription';\nimport { dateTimestampProvider } from './scheduler/dateTimestampProvider';\n\n/**\n * A variant of {@link Subject} that \"replays\" old values to new subscribers by emitting them when they first subscribe.\n *\n * `ReplaySubject` has an internal buffer that will store a specified number of values that it has observed. Like `Subject`,\n * `ReplaySubject` \"observes\" values by having them passed to its `next` method. When it observes a value, it will store that\n * value for a time determined by the configuration of the `ReplaySubject`, as passed to its constructor.\n *\n * When a new subscriber subscribes to the `ReplaySubject` instance, it will synchronously emit all values in its buffer in\n * a First-In-First-Out (FIFO) manner. The `ReplaySubject` will also complete, if it has observed completion; and it will\n * error if it has observed an error.\n *\n * There are two main configuration items to be concerned with:\n *\n * 1. `bufferSize` - This will determine how many items are stored in the buffer, defaults to infinite.\n * 2. `windowTime` - The amount of time to hold a value in the buffer before removing it from the buffer.\n *\n * Both configurations may exist simultaneously. So if you would like to buffer a maximum of 3 values, as long as the values\n * are less than 2 seconds old, you could do so with a `new ReplaySubject(3, 2000)`.\n *\n * ### Differences with BehaviorSubject\n *\n * `BehaviorSubject` is similar to `new ReplaySubject(1)`, with a couple of exceptions:\n *\n * 1. `BehaviorSubject` comes \"primed\" with a single value upon construction.\n * 2. `ReplaySubject` will replay values, even after observing an error, where `BehaviorSubject` will not.\n *\n * @see {@link Subject}\n * @see {@link BehaviorSubject}\n * @see {@link shareReplay}\n */\nexport class ReplaySubject extends Subject {\n private _buffer: (T | number)[] = [];\n private _infiniteTimeWindow = true;\n\n /**\n * @param bufferSize The size of the buffer to replay on subscription\n * @param windowTime The amount of time the buffered items will stay buffered\n * @param timestampProvider An object with a `now()` method that provides the current timestamp. This is used to\n * calculate the amount of time something has been buffered.\n */\n constructor(\n private _bufferSize = Infinity,\n private _windowTime = Infinity,\n private _timestampProvider: TimestampProvider = dateTimestampProvider\n ) {\n super();\n this._infiniteTimeWindow = _windowTime === Infinity;\n this._bufferSize = Math.max(1, _bufferSize);\n this._windowTime = Math.max(1, _windowTime);\n }\n\n next(value: T): void {\n const { isStopped, _buffer, _infiniteTimeWindow, _timestampProvider, _windowTime } = this;\n if (!isStopped) {\n _buffer.push(value);\n !_infiniteTimeWindow && _buffer.push(_timestampProvider.now() + _windowTime);\n }\n this._trimBuffer();\n super.next(value);\n }\n\n /** @internal */\n protected _subscribe(subscriber: Subscriber): Subscription {\n this._throwIfClosed();\n this._trimBuffer();\n\n const subscription = this._innerSubscribe(subscriber);\n\n const { _infiniteTimeWindow, _buffer } = this;\n // We use a copy here, so reentrant code does not mutate our array while we're\n // emitting it to a new subscriber.\n const copy = _buffer.slice();\n for (let i = 0; i < copy.length && !subscriber.closed; i += _infiniteTimeWindow ? 1 : 2) {\n subscriber.next(copy[i] as T);\n }\n\n this._checkFinalizedStatuses(subscriber);\n\n return subscription;\n }\n\n private _trimBuffer() {\n const { _bufferSize, _timestampProvider, _buffer, _infiniteTimeWindow } = this;\n // If we don't have an infinite buffer size, and we're over the length,\n // use splice to truncate the old buffer values off. Note that we have to\n // double the size for instances where we're not using an infinite time window\n // because we're storing the values and the timestamps in the same array.\n const adjustedBufferSize = (_infiniteTimeWindow ? 1 : 2) * _bufferSize;\n _bufferSize < Infinity && adjustedBufferSize < _buffer.length && _buffer.splice(0, _buffer.length - adjustedBufferSize);\n\n // Now, if we're not in an infinite time window, remove all values where the time is\n // older than what is allowed.\n if (!_infiniteTimeWindow) {\n const now = _timestampProvider.now();\n let last = 0;\n // Search the array for the first timestamp that isn't expired and\n // truncate the buffer up to that point.\n for (let i = 1; i < _buffer.length && (_buffer[i] as number) <= now; i += 2) {\n last = i;\n }\n last && _buffer.splice(0, last + 1);\n }\n }\n}\n", "import { Scheduler } from '../Scheduler';\nimport { Subscription } from '../Subscription';\nimport { SchedulerAction } from '../types';\n\n/**\n * A unit of work to be executed in a `scheduler`. An action is typically\n * created from within a {@link SchedulerLike} and an RxJS user does not need to concern\n * themselves about creating and manipulating an Action.\n *\n * ```ts\n * class Action extends Subscription {\n * new (scheduler: Scheduler, work: (state?: T) => void);\n * schedule(state?: T, delay: number = 0): Subscription;\n * }\n * ```\n *\n * @class Action\n */\nexport class Action extends Subscription {\n constructor(scheduler: Scheduler, work: (this: SchedulerAction, state?: T) => void) {\n super();\n }\n /**\n * Schedules this action on its parent {@link SchedulerLike} for execution. May be passed\n * some context object, `state`. May happen at some point in the future,\n * according to the `delay` parameter, if specified.\n * @param {T} [state] Some contextual data that the `work` function uses when\n * called by the Scheduler.\n * @param {number} [delay] Time to wait before executing the work, where the\n * time unit is implicit and defined by the Scheduler.\n * @return {void}\n */\n public schedule(state?: T, delay: number = 0): Subscription {\n return this;\n }\n}\n", "import type { TimerHandle } from './timerHandle';\ntype SetIntervalFunction = (handler: () => void, timeout?: number, ...args: any[]) => TimerHandle;\ntype ClearIntervalFunction = (handle: TimerHandle) => void;\n\ninterface IntervalProvider {\n setInterval: SetIntervalFunction;\n clearInterval: ClearIntervalFunction;\n delegate:\n | {\n setInterval: SetIntervalFunction;\n clearInterval: ClearIntervalFunction;\n }\n | undefined;\n}\n\nexport const intervalProvider: IntervalProvider = {\n // When accessing the delegate, use the variable rather than `this` so that\n // the functions can be called without being bound to the provider.\n setInterval(handler: () => void, timeout?: number, ...args) {\n const { delegate } = intervalProvider;\n if (delegate?.setInterval) {\n return delegate.setInterval(handler, timeout, ...args);\n }\n return setInterval(handler, timeout, ...args);\n },\n clearInterval(handle) {\n const { delegate } = intervalProvider;\n return (delegate?.clearInterval || clearInterval)(handle as any);\n },\n delegate: undefined,\n};\n", "import { Action } from './Action';\nimport { SchedulerAction } from '../types';\nimport { Subscription } from '../Subscription';\nimport { AsyncScheduler } from './AsyncScheduler';\nimport { intervalProvider } from './intervalProvider';\nimport { arrRemove } from '../util/arrRemove';\nimport { TimerHandle } from './timerHandle';\n\nexport class AsyncAction extends Action {\n public id: TimerHandle | undefined;\n public state?: T;\n // @ts-ignore: Property has no initializer and is not definitely assigned\n public delay: number;\n protected pending: boolean = false;\n\n constructor(protected scheduler: AsyncScheduler, protected work: (this: SchedulerAction, state?: T) => void) {\n super(scheduler, work);\n }\n\n public schedule(state?: T, delay: number = 0): Subscription {\n if (this.closed) {\n return this;\n }\n\n // Always replace the current state with the new state.\n this.state = state;\n\n const id = this.id;\n const scheduler = this.scheduler;\n\n //\n // Important implementation note:\n //\n // Actions only execute once by default, unless rescheduled from within the\n // scheduled callback. This allows us to implement single and repeat\n // actions via the same code path, without adding API surface area, as well\n // as mimic traditional recursion but across asynchronous boundaries.\n //\n // However, JS runtimes and timers distinguish between intervals achieved by\n // serial `setTimeout` calls vs. a single `setInterval` call. An interval of\n // serial `setTimeout` calls can be individually delayed, which delays\n // scheduling the next `setTimeout`, and so on. `setInterval` attempts to\n // guarantee the interval callback will be invoked more precisely to the\n // interval period, regardless of load.\n //\n // Therefore, we use `setInterval` to schedule single and repeat actions.\n // If the action reschedules itself with the same delay, the interval is not\n // canceled. If the action doesn't reschedule, or reschedules with a\n // different delay, the interval will be canceled after scheduled callback\n // execution.\n //\n if (id != null) {\n this.id = this.recycleAsyncId(scheduler, id, delay);\n }\n\n // Set the pending flag indicating that this action has been scheduled, or\n // has recursively rescheduled itself.\n this.pending = true;\n\n this.delay = delay;\n // If this action has already an async Id, don't request a new one.\n this.id = this.id ?? this.requestAsyncId(scheduler, this.id, delay);\n\n return this;\n }\n\n protected requestAsyncId(scheduler: AsyncScheduler, _id?: TimerHandle, delay: number = 0): TimerHandle {\n return intervalProvider.setInterval(scheduler.flush.bind(scheduler, this), delay);\n }\n\n protected recycleAsyncId(_scheduler: AsyncScheduler, id?: TimerHandle, delay: number | null = 0): TimerHandle | undefined {\n // If this action is rescheduled with the same delay time, don't clear the interval id.\n if (delay != null && this.delay === delay && this.pending === false) {\n return id;\n }\n // Otherwise, if the action's delay time is different from the current delay,\n // or the action has been rescheduled before it's executed, clear the interval id\n if (id != null) {\n intervalProvider.clearInterval(id);\n }\n\n return undefined;\n }\n\n /**\n * Immediately executes this action and the `work` it contains.\n * @return {any}\n */\n public execute(state: T, delay: number): any {\n if (this.closed) {\n return new Error('executing a cancelled action');\n }\n\n this.pending = false;\n const error = this._execute(state, delay);\n if (error) {\n return error;\n } else if (this.pending === false && this.id != null) {\n // Dequeue if the action didn't reschedule itself. Don't call\n // unsubscribe(), because the action could reschedule later.\n // For example:\n // ```\n // scheduler.schedule(function doWork(counter) {\n // /* ... I'm a busy worker bee ... */\n // var originalAction = this;\n // /* wait 100ms before rescheduling the action */\n // setTimeout(function () {\n // originalAction.schedule(counter + 1);\n // }, 100);\n // }, 1000);\n // ```\n this.id = this.recycleAsyncId(this.scheduler, this.id, null);\n }\n }\n\n protected _execute(state: T, _delay: number): any {\n let errored: boolean = false;\n let errorValue: any;\n try {\n this.work(state);\n } catch (e) {\n errored = true;\n // HACK: Since code elsewhere is relying on the \"truthiness\" of the\n // return here, we can't have it return \"\" or 0 or false.\n // TODO: Clean this up when we refactor schedulers mid-version-8 or so.\n errorValue = e ? e : new Error('Scheduled action threw falsy error');\n }\n if (errored) {\n this.unsubscribe();\n return errorValue;\n }\n }\n\n unsubscribe() {\n if (!this.closed) {\n const { id, scheduler } = this;\n const { actions } = scheduler;\n\n this.work = this.state = this.scheduler = null!;\n this.pending = false;\n\n arrRemove(actions, this);\n if (id != null) {\n this.id = this.recycleAsyncId(scheduler, id, null);\n }\n\n this.delay = null!;\n super.unsubscribe();\n }\n }\n}\n", "import { Action } from './scheduler/Action';\nimport { Subscription } from './Subscription';\nimport { SchedulerLike, SchedulerAction } from './types';\nimport { dateTimestampProvider } from './scheduler/dateTimestampProvider';\n\n/**\n * An execution context and a data structure to order tasks and schedule their\n * execution. Provides a notion of (potentially virtual) time, through the\n * `now()` getter method.\n *\n * Each unit of work in a Scheduler is called an `Action`.\n *\n * ```ts\n * class Scheduler {\n * now(): number;\n * schedule(work, delay?, state?): Subscription;\n * }\n * ```\n *\n * @class Scheduler\n * @deprecated Scheduler is an internal implementation detail of RxJS, and\n * should not be used directly. Rather, create your own class and implement\n * {@link SchedulerLike}. Will be made internal in v8.\n */\nexport class Scheduler implements SchedulerLike {\n public static now: () => number = dateTimestampProvider.now;\n\n constructor(private schedulerActionCtor: typeof Action, now: () => number = Scheduler.now) {\n this.now = now;\n }\n\n /**\n * A getter method that returns a number representing the current time\n * (at the time this function was called) according to the scheduler's own\n * internal clock.\n * @return {number} A number that represents the current time. May or may not\n * have a relation to wall-clock time. May or may not refer to a time unit\n * (e.g. milliseconds).\n */\n public now: () => number;\n\n /**\n * Schedules a function, `work`, for execution. May happen at some point in\n * the future, according to the `delay` parameter, if specified. May be passed\n * some context object, `state`, which will be passed to the `work` function.\n *\n * The given arguments will be processed an stored as an Action object in a\n * queue of actions.\n *\n * @param {function(state: ?T): ?Subscription} work A function representing a\n * task, or some unit of work to be executed by the Scheduler.\n * @param {number} [delay] Time to wait before executing the work, where the\n * time unit is implicit and defined by the Scheduler itself.\n * @param {T} [state] Some contextual data that the `work` function uses when\n * called by the Scheduler.\n * @return {Subscription} A subscription in order to be able to unsubscribe\n * the scheduled work.\n */\n public schedule(work: (this: SchedulerAction, state?: T) => void, delay: number = 0, state?: T): Subscription {\n return new this.schedulerActionCtor(this, work).schedule(state, delay);\n }\n}\n", "import { Scheduler } from '../Scheduler';\nimport { Action } from './Action';\nimport { AsyncAction } from './AsyncAction';\nimport { TimerHandle } from './timerHandle';\n\nexport class AsyncScheduler extends Scheduler {\n public actions: Array> = [];\n /**\n * A flag to indicate whether the Scheduler is currently executing a batch of\n * queued actions.\n * @type {boolean}\n * @internal\n */\n public _active: boolean = false;\n /**\n * An internal ID used to track the latest asynchronous task such as those\n * coming from `setTimeout`, `setInterval`, `requestAnimationFrame`, and\n * others.\n * @type {any}\n * @internal\n */\n public _scheduled: TimerHandle | undefined;\n\n constructor(SchedulerAction: typeof Action, now: () => number = Scheduler.now) {\n super(SchedulerAction, now);\n }\n\n public flush(action: AsyncAction): void {\n const { actions } = this;\n\n if (this._active) {\n actions.push(action);\n return;\n }\n\n let error: any;\n this._active = true;\n\n do {\n if ((error = action.execute(action.state, action.delay))) {\n break;\n }\n } while ((action = actions.shift()!)); // exhaust the scheduler queue\n\n this._active = false;\n\n if (error) {\n while ((action = actions.shift()!)) {\n action.unsubscribe();\n }\n throw error;\n }\n }\n}\n", "import { AsyncAction } from './AsyncAction';\nimport { AsyncScheduler } from './AsyncScheduler';\n\n/**\n *\n * Async Scheduler\n *\n * Schedule task as if you used setTimeout(task, duration)\n *\n * `async` scheduler schedules tasks asynchronously, by putting them on the JavaScript\n * event loop queue. It is best used to delay tasks in time or to schedule tasks repeating\n * in intervals.\n *\n * If you just want to \"defer\" task, that is to perform it right after currently\n * executing synchronous code ends (commonly achieved by `setTimeout(deferredTask, 0)`),\n * better choice will be the {@link asapScheduler} scheduler.\n *\n * ## Examples\n * Use async scheduler to delay task\n * ```ts\n * import { asyncScheduler } from 'rxjs';\n *\n * const task = () => console.log('it works!');\n *\n * asyncScheduler.schedule(task, 2000);\n *\n * // After 2 seconds logs:\n * // \"it works!\"\n * ```\n *\n * Use async scheduler to repeat task in intervals\n * ```ts\n * import { asyncScheduler } from 'rxjs';\n *\n * function task(state) {\n * console.log(state);\n * this.schedule(state + 1, 1000); // `this` references currently executing Action,\n * // which we reschedule with new state and delay\n * }\n *\n * asyncScheduler.schedule(task, 3000, 0);\n *\n * // Logs:\n * // 0 after 3s\n * // 1 after 4s\n * // 2 after 5s\n * // 3 after 6s\n * ```\n */\n\nexport const asyncScheduler = new AsyncScheduler(AsyncAction);\n\n/**\n * @deprecated Renamed to {@link asyncScheduler}. Will be removed in v8.\n */\nexport const async = asyncScheduler;\n", "import { AsyncAction } from './AsyncAction';\nimport { Subscription } from '../Subscription';\nimport { QueueScheduler } from './QueueScheduler';\nimport { SchedulerAction } from '../types';\nimport { TimerHandle } from './timerHandle';\n\nexport class QueueAction extends AsyncAction {\n constructor(protected scheduler: QueueScheduler, protected work: (this: SchedulerAction, state?: T) => void) {\n super(scheduler, work);\n }\n\n public schedule(state?: T, delay: number = 0): Subscription {\n if (delay > 0) {\n return super.schedule(state, delay);\n }\n this.delay = delay;\n this.state = state;\n this.scheduler.flush(this);\n return this;\n }\n\n public execute(state: T, delay: number): any {\n return delay > 0 || this.closed ? super.execute(state, delay) : this._execute(state, delay);\n }\n\n protected requestAsyncId(scheduler: QueueScheduler, id?: TimerHandle, delay: number = 0): TimerHandle {\n // If delay exists and is greater than 0, or if the delay is null (the\n // action wasn't rescheduled) but was originally scheduled as an async\n // action, then recycle as an async action.\n\n if ((delay != null && delay > 0) || (delay == null && this.delay > 0)) {\n return super.requestAsyncId(scheduler, id, delay);\n }\n\n // Otherwise flush the scheduler starting with this action.\n scheduler.flush(this);\n\n // HACK: In the past, this was returning `void`. However, `void` isn't a valid\n // `TimerHandle`, and generally the return value here isn't really used. So the\n // compromise is to return `0` which is both \"falsy\" and a valid `TimerHandle`,\n // as opposed to refactoring every other instanceo of `requestAsyncId`.\n return 0;\n }\n}\n", "import { AsyncScheduler } from './AsyncScheduler';\n\nexport class QueueScheduler extends AsyncScheduler {\n}\n", "import { QueueAction } from './QueueAction';\nimport { QueueScheduler } from './QueueScheduler';\n\n/**\n *\n * Queue Scheduler\n *\n * Put every next task on a queue, instead of executing it immediately\n *\n * `queue` scheduler, when used with delay, behaves the same as {@link asyncScheduler} scheduler.\n *\n * When used without delay, it schedules given task synchronously - executes it right when\n * it is scheduled. However when called recursively, that is when inside the scheduled task,\n * another task is scheduled with queue scheduler, instead of executing immediately as well,\n * that task will be put on a queue and wait for current one to finish.\n *\n * This means that when you execute task with `queue` scheduler, you are sure it will end\n * before any other task scheduled with that scheduler will start.\n *\n * ## Examples\n * Schedule recursively first, then do something\n * ```ts\n * import { queueScheduler } from 'rxjs';\n *\n * queueScheduler.schedule(() => {\n * queueScheduler.schedule(() => console.log('second')); // will not happen now, but will be put on a queue\n *\n * console.log('first');\n * });\n *\n * // Logs:\n * // \"first\"\n * // \"second\"\n * ```\n *\n * Reschedule itself recursively\n * ```ts\n * import { queueScheduler } from 'rxjs';\n *\n * queueScheduler.schedule(function(state) {\n * if (state !== 0) {\n * console.log('before', state);\n * this.schedule(state - 1); // `this` references currently executing Action,\n * // which we reschedule with new state\n * console.log('after', state);\n * }\n * }, 0, 3);\n *\n * // In scheduler that runs recursively, you would expect:\n * // \"before\", 3\n * // \"before\", 2\n * // \"before\", 1\n * // \"after\", 1\n * // \"after\", 2\n * // \"after\", 3\n *\n * // But with queue it logs:\n * // \"before\", 3\n * // \"after\", 3\n * // \"before\", 2\n * // \"after\", 2\n * // \"before\", 1\n * // \"after\", 1\n * ```\n */\n\nexport const queueScheduler = new QueueScheduler(QueueAction);\n\n/**\n * @deprecated Renamed to {@link queueScheduler}. Will be removed in v8.\n */\nexport const queue = queueScheduler;\n", "import { AsyncAction } from './AsyncAction';\nimport { AnimationFrameScheduler } from './AnimationFrameScheduler';\nimport { SchedulerAction } from '../types';\nimport { animationFrameProvider } from './animationFrameProvider';\nimport { TimerHandle } from './timerHandle';\n\nexport class AnimationFrameAction extends AsyncAction {\n constructor(protected scheduler: AnimationFrameScheduler, protected work: (this: SchedulerAction, state?: T) => void) {\n super(scheduler, work);\n }\n\n protected requestAsyncId(scheduler: AnimationFrameScheduler, id?: TimerHandle, delay: number = 0): TimerHandle {\n // If delay is greater than 0, request as an async action.\n if (delay !== null && delay > 0) {\n return super.requestAsyncId(scheduler, id, delay);\n }\n // Push the action to the end of the scheduler queue.\n scheduler.actions.push(this);\n // If an animation frame has already been requested, don't request another\n // one. If an animation frame hasn't been requested yet, request one. Return\n // the current animation frame request id.\n return scheduler._scheduled || (scheduler._scheduled = animationFrameProvider.requestAnimationFrame(() => scheduler.flush(undefined)));\n }\n\n protected recycleAsyncId(scheduler: AnimationFrameScheduler, id?: TimerHandle, delay: number = 0): TimerHandle | undefined {\n // If delay exists and is greater than 0, or if the delay is null (the\n // action wasn't rescheduled) but was originally scheduled as an async\n // action, then recycle as an async action.\n if (delay != null ? delay > 0 : this.delay > 0) {\n return super.recycleAsyncId(scheduler, id, delay);\n }\n // If the scheduler queue has no remaining actions with the same async id,\n // cancel the requested animation frame and set the scheduled flag to\n // undefined so the next AnimationFrameAction will request its own.\n const { actions } = scheduler;\n if (id != null && actions[actions.length - 1]?.id !== id) {\n animationFrameProvider.cancelAnimationFrame(id as number);\n scheduler._scheduled = undefined;\n }\n // Return undefined so the action knows to request a new async id if it's rescheduled.\n return undefined;\n }\n}\n", "import { AsyncAction } from './AsyncAction';\nimport { AsyncScheduler } from './AsyncScheduler';\n\nexport class AnimationFrameScheduler extends AsyncScheduler {\n public flush(action?: AsyncAction): void {\n this._active = true;\n // The async id that effects a call to flush is stored in _scheduled.\n // Before executing an action, it's necessary to check the action's async\n // id to determine whether it's supposed to be executed in the current\n // flush.\n // Previous implementations of this method used a count to determine this,\n // but that was unsound, as actions that are unsubscribed - i.e. cancelled -\n // are removed from the actions array and that can shift actions that are\n // scheduled to be executed in a subsequent flush into positions at which\n // they are executed within the current flush.\n const flushId = this._scheduled;\n this._scheduled = undefined;\n\n const { actions } = this;\n let error: any;\n action = action || actions.shift()!;\n\n do {\n if ((error = action.execute(action.state, action.delay))) {\n break;\n }\n } while ((action = actions[0]) && action.id === flushId && actions.shift());\n\n this._active = false;\n\n if (error) {\n while ((action = actions[0]) && action.id === flushId && actions.shift()) {\n action.unsubscribe();\n }\n throw error;\n }\n }\n}\n", "import { AnimationFrameAction } from './AnimationFrameAction';\nimport { AnimationFrameScheduler } from './AnimationFrameScheduler';\n\n/**\n *\n * Animation Frame Scheduler\n *\n * Perform task when `window.requestAnimationFrame` would fire\n *\n * When `animationFrame` scheduler is used with delay, it will fall back to {@link asyncScheduler} scheduler\n * behaviour.\n *\n * Without delay, `animationFrame` scheduler can be used to create smooth browser animations.\n * It makes sure scheduled task will happen just before next browser content repaint,\n * thus performing animations as efficiently as possible.\n *\n * ## Example\n * Schedule div height animation\n * ```ts\n * // html:
\n * import { animationFrameScheduler } from 'rxjs';\n *\n * const div = document.querySelector('div');\n *\n * animationFrameScheduler.schedule(function(height) {\n * div.style.height = height + \"px\";\n *\n * this.schedule(height + 1); // `this` references currently executing Action,\n * // which we reschedule with new state\n * }, 0, 0);\n *\n * // You will see a div element growing in height\n * ```\n */\n\nexport const animationFrameScheduler = new AnimationFrameScheduler(AnimationFrameAction);\n\n/**\n * @deprecated Renamed to {@link animationFrameScheduler}. Will be removed in v8.\n */\nexport const animationFrame = animationFrameScheduler;\n", "import { Observable } from '../Observable';\nimport { SchedulerLike } from '../types';\n\n/**\n * A simple Observable that emits no items to the Observer and immediately\n * emits a complete notification.\n *\n * Just emits 'complete', and nothing else.\n *\n * ![](empty.png)\n *\n * A simple Observable that only emits the complete notification. It can be used\n * for composing with other Observables, such as in a {@link mergeMap}.\n *\n * ## Examples\n *\n * Log complete notification\n *\n * ```ts\n * import { EMPTY } from 'rxjs';\n *\n * EMPTY.subscribe({\n * next: () => console.log('Next'),\n * complete: () => console.log('Complete!')\n * });\n *\n * // Outputs\n * // Complete!\n * ```\n *\n * Emit the number 7, then complete\n *\n * ```ts\n * import { EMPTY, startWith } from 'rxjs';\n *\n * const result = EMPTY.pipe(startWith(7));\n * result.subscribe(x => console.log(x));\n *\n * // Outputs\n * // 7\n * ```\n *\n * Map and flatten only odd numbers to the sequence `'a'`, `'b'`, `'c'`\n *\n * ```ts\n * import { interval, mergeMap, of, EMPTY } from 'rxjs';\n *\n * const interval$ = interval(1000);\n * const result = interval$.pipe(\n * mergeMap(x => x % 2 === 1 ? of('a', 'b', 'c') : EMPTY),\n * );\n * result.subscribe(x => console.log(x));\n *\n * // Results in the following to the console:\n * // x is equal to the count on the interval, e.g. (0, 1, 2, 3, ...)\n * // x will occur every 1000ms\n * // if x % 2 is equal to 1, print a, b, c (each on its own)\n * // if x % 2 is not equal to 1, nothing will be output\n * ```\n *\n * @see {@link Observable}\n * @see {@link NEVER}\n * @see {@link of}\n * @see {@link throwError}\n */\nexport const EMPTY = new Observable((subscriber) => subscriber.complete());\n\n/**\n * @param scheduler A {@link SchedulerLike} to use for scheduling\n * the emission of the complete notification.\n * @deprecated Replaced with the {@link EMPTY} constant or {@link scheduled} (e.g. `scheduled([], scheduler)`). Will be removed in v8.\n */\nexport function empty(scheduler?: SchedulerLike) {\n return scheduler ? emptyScheduled(scheduler) : EMPTY;\n}\n\nfunction emptyScheduled(scheduler: SchedulerLike) {\n return new Observable((subscriber) => scheduler.schedule(() => subscriber.complete()));\n}\n", "import { SchedulerLike } from '../types';\nimport { isFunction } from './isFunction';\n\nexport function isScheduler(value: any): value is SchedulerLike {\n return value && isFunction(value.schedule);\n}\n", "import { SchedulerLike } from '../types';\nimport { isFunction } from './isFunction';\nimport { isScheduler } from './isScheduler';\n\nfunction last(arr: T[]): T | undefined {\n return arr[arr.length - 1];\n}\n\nexport function popResultSelector(args: any[]): ((...args: unknown[]) => unknown) | undefined {\n return isFunction(last(args)) ? args.pop() : undefined;\n}\n\nexport function popScheduler(args: any[]): SchedulerLike | undefined {\n return isScheduler(last(args)) ? args.pop() : undefined;\n}\n\nexport function popNumber(args: any[], defaultValue: number): number {\n return typeof last(args) === 'number' ? args.pop()! : defaultValue;\n}\n", "export const isArrayLike = ((x: any): x is ArrayLike => x && typeof x.length === 'number' && typeof x !== 'function');", "import { isFunction } from \"./isFunction\";\n\n/**\n * Tests to see if the object is \"thennable\".\n * @param value the object to test\n */\nexport function isPromise(value: any): value is PromiseLike {\n return isFunction(value?.then);\n}\n", "import { InteropObservable } from '../types';\nimport { observable as Symbol_observable } from '../symbol/observable';\nimport { isFunction } from './isFunction';\n\n/** Identifies an input as being Observable (but not necessary an Rx Observable) */\nexport function isInteropObservable(input: any): input is InteropObservable {\n return isFunction(input[Symbol_observable]);\n}\n", "import { isFunction } from './isFunction';\n\nexport function isAsyncIterable(obj: any): obj is AsyncIterable {\n return Symbol.asyncIterator && isFunction(obj?.[Symbol.asyncIterator]);\n}\n", "/**\n * Creates the TypeError to throw if an invalid object is passed to `from` or `scheduled`.\n * @param input The object that was passed.\n */\nexport function createInvalidObservableTypeError(input: any) {\n // TODO: We should create error codes that can be looked up, so this can be less verbose.\n return new TypeError(\n `You provided ${\n input !== null && typeof input === 'object' ? 'an invalid object' : `'${input}'`\n } where a stream was expected. You can provide an Observable, Promise, ReadableStream, Array, AsyncIterable, or Iterable.`\n );\n}\n", "export function getSymbolIterator(): symbol {\n if (typeof Symbol !== 'function' || !Symbol.iterator) {\n return '@@iterator' as any;\n }\n\n return Symbol.iterator;\n}\n\nexport const iterator = getSymbolIterator();\n", "import { iterator as Symbol_iterator } from '../symbol/iterator';\nimport { isFunction } from './isFunction';\n\n/** Identifies an input as being an Iterable */\nexport function isIterable(input: any): input is Iterable {\n return isFunction(input?.[Symbol_iterator]);\n}\n", "import { ReadableStreamLike } from '../types';\nimport { isFunction } from './isFunction';\n\nexport async function* readableStreamLikeToAsyncGenerator(readableStream: ReadableStreamLike): AsyncGenerator {\n const reader = readableStream.getReader();\n try {\n while (true) {\n const { value, done } = await reader.read();\n if (done) {\n return;\n }\n yield value!;\n }\n } finally {\n reader.releaseLock();\n }\n}\n\nexport function isReadableStreamLike(obj: any): obj is ReadableStreamLike {\n // We don't want to use instanceof checks because they would return\n // false for instances from another Realm, like an