From 3dbfda123bdf1c5326be631114406b6edea6f7fd Mon Sep 17 00:00:00 2001
From: Elastic Machine
Date: Mon, 10 Nov 2025 06:03:25 +0000
Subject: [PATCH] Auto-generated API code
---
elasticsearch/_async/client/__init__.py | 4 +-
elasticsearch/_async/client/cat.py | 38 ++
elasticsearch/_async/client/cluster.py | 35 +-
elasticsearch/_async/client/indices.py | 331 +++++++++++++++++-
elasticsearch/_async/client/inference.py | 122 ++++---
elasticsearch/_async/client/nodes.py | 172 ++++++++-
elasticsearch/_async/client/security.py | 5 +-
elasticsearch/_async/client/snapshot.py | 2 +-
elasticsearch/_async/client/text_structure.py | 4 +-
elasticsearch/_sync/client/__init__.py | 4 +-
elasticsearch/_sync/client/cat.py | 38 ++
elasticsearch/_sync/client/cluster.py | 35 +-
elasticsearch/_sync/client/indices.py | 331 +++++++++++++++++-
elasticsearch/_sync/client/inference.py | 122 ++++---
elasticsearch/_sync/client/nodes.py | 172 ++++++++-
elasticsearch/_sync/client/security.py | 5 +-
elasticsearch/_sync/client/snapshot.py | 2 +-
elasticsearch/_sync/client/text_structure.py | 4 +-
elasticsearch/_version.py | 2 +-
elasticsearch/dsl/aggs.py | 18 +-
20 files changed, 1278 insertions(+), 168 deletions(-)
diff --git a/elasticsearch/_async/client/__init__.py b/elasticsearch/_async/client/__init__.py
index 8c7466a45..aaa897367 100644
--- a/elasticsearch/_async/client/__init__.py
+++ b/elasticsearch/_async/client/__init__.py
@@ -2133,7 +2133,7 @@ async def field_caps(
] = None,
fields: t.Optional[t.Union[str, t.Sequence[str]]] = None,
filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
- filters: t.Optional[str] = None,
+ filters: t.Optional[t.Union[str, t.Sequence[str]]] = None,
human: t.Optional[bool] = None,
ignore_unavailable: t.Optional[bool] = None,
include_empty_fields: t.Optional[bool] = None,
@@ -5599,7 +5599,7 @@ async def search_template(
async def terms_enum(
self,
*,
- index: str,
+ index: t.Union[str, t.Sequence[str]],
field: t.Optional[str] = None,
case_insensitive: t.Optional[bool] = None,
error_trace: t.Optional[bool] = None,
diff --git a/elasticsearch/_async/client/cat.py b/elasticsearch/_async/client/cat.py
index 8eff2a08c..ee09e8cfa 100644
--- a/elasticsearch/_async/client/cat.py
+++ b/elasticsearch/_async/client/cat.py
@@ -3310,10 +3310,20 @@ async def segments(
self,
*,
index: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+ allow_closed: t.Optional[bool] = None,
+ allow_no_indices: t.Optional[bool] = None,
bytes: t.Optional[
t.Union[str, t.Literal["b", "gb", "kb", "mb", "pb", "tb"]]
] = None,
error_trace: t.Optional[bool] = None,
+ expand_wildcards: t.Optional[
+ t.Union[
+ t.Sequence[
+ t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]]
+ ],
+ t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]],
+ ]
+ ] = None,
filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
format: t.Optional[str] = None,
h: t.Optional[
@@ -3364,6 +3374,8 @@ async def segments(
] = None,
help: t.Optional[bool] = None,
human: t.Optional[bool] = None,
+ ignore_throttled: t.Optional[bool] = None,
+ ignore_unavailable: t.Optional[bool] = None,
local: t.Optional[bool] = None,
master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
pretty: t.Optional[bool] = None,
@@ -3387,6 +3399,14 @@ async def segments(
:param index: A comma-separated list of data streams, indices, and aliases used
to limit the request. Supports wildcards (`*`). To target all data streams
and indices, omit this parameter or use `*` or `_all`.
+ :param allow_closed: If true, allow closed indices to be returned in the response
+ otherwise if false, keep the legacy behaviour of throwing an exception if
+ index pattern matches closed indices
+ :param allow_no_indices: If false, the request returns an error if any wildcard
+ expression, index alias, or _all value targets only missing or closed indices.
+ This behavior applies even if the request targets other open indices. For
+ example, a request targeting foo*,bar* returns an error if an index starts
+ with foo but no index starts with bar.
:param bytes: Sets the units for columns that contain a byte-size value. Note
that byte-size value units work in terms of powers of 1024. For instance
`1kb` means 1024 bytes, not 1000 bytes. If omitted, byte-size values are
@@ -3395,12 +3415,20 @@ async def segments(
least `1.0`. If given, byte-size values are rendered as an integer with no
suffix, representing the value of the column in the chosen unit. Values that
are not an exact multiple of the chosen unit are rounded down.
+ :param expand_wildcards: Type of index that wildcard expressions can match. If
+ the request can target data streams, this argument determines whether wildcard
+ expressions match hidden data streams. Supports comma-separated values, such
+ as open,hidden.
:param format: Specifies the format to return the columnar data in, can be set
to `text`, `json`, `cbor`, `yaml`, or `smile`.
:param h: A comma-separated list of columns names to display. It supports simple
wildcards.
:param help: When set to `true` will output available columns. This option can't
be combined with any other query string option.
+ :param ignore_throttled: If true, concrete, expanded or aliased indices are ignored
+ when frozen.
+ :param ignore_unavailable: If true, missing or closed indices are not included
+ in the response.
:param local: If `true`, the request computes the list of selected nodes from
the local cluster state. If `false` the list of selected nodes are computed
from the cluster state of the master node. In both cases the coordinating
@@ -3425,10 +3453,16 @@ async def segments(
__path_parts = {}
__path = "/_cat/segments"
__query: t.Dict[str, t.Any] = {}
+ if allow_closed is not None:
+ __query["allow_closed"] = allow_closed
+ if allow_no_indices is not None:
+ __query["allow_no_indices"] = allow_no_indices
if bytes is not None:
__query["bytes"] = bytes
if error_trace is not None:
__query["error_trace"] = error_trace
+ if expand_wildcards is not None:
+ __query["expand_wildcards"] = expand_wildcards
if filter_path is not None:
__query["filter_path"] = filter_path
if format is not None:
@@ -3439,6 +3473,10 @@ async def segments(
__query["help"] = help
if human is not None:
__query["human"] = human
+ if ignore_throttled is not None:
+ __query["ignore_throttled"] = ignore_throttled
+ if ignore_unavailable is not None:
+ __query["ignore_unavailable"] = ignore_unavailable
if local is not None:
__query["local"] = local
if master_timeout is not None:
diff --git a/elasticsearch/_async/client/cluster.py b/elasticsearch/_async/client/cluster.py
index a6efa8529..929b16021 100644
--- a/elasticsearch/_async/client/cluster.py
+++ b/elasticsearch/_async/client/cluster.py
@@ -1068,7 +1068,40 @@ async def reroute(
async def state(
self,
*,
- metric: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+ metric: t.Optional[
+ t.Union[
+ t.Sequence[
+ t.Union[
+ str,
+ t.Literal[
+ "_all",
+ "blocks",
+ "customs",
+ "master_node",
+ "metadata",
+ "nodes",
+ "routing_nodes",
+ "routing_table",
+ "version",
+ ],
+ ]
+ ],
+ t.Union[
+ str,
+ t.Literal[
+ "_all",
+ "blocks",
+ "customs",
+ "master_node",
+ "metadata",
+ "nodes",
+ "routing_nodes",
+ "routing_table",
+ "version",
+ ],
+ ],
+ ]
+ ] = None,
index: t.Optional[t.Union[str, t.Sequence[str]]] = None,
allow_no_indices: t.Optional[bool] = None,
error_trace: t.Optional[bool] = None,
diff --git a/elasticsearch/_async/client/indices.py b/elasticsearch/_async/client/indices.py
index 81be48d28..873348a0a 100644
--- a/elasticsearch/_async/client/indices.py
+++ b/elasticsearch/_async/client/indices.py
@@ -35,7 +35,7 @@ class IndicesClient(NamespacedClient):
async def add_block(
self,
*,
- index: str,
+ index: t.Union[str, t.Sequence[str]],
block: t.Union[str, t.Literal["metadata", "read", "read_only", "write"]],
allow_no_indices: t.Optional[bool] = None,
error_trace: t.Optional[bool] = None,
@@ -244,7 +244,6 @@ async def analyze(
)
@_rewrite_parameters()
- @_stability_warning(Stability.EXPERIMENTAL)
async def cancel_migrate_reindex(
self,
*,
@@ -778,7 +777,6 @@ async def create_data_stream(
@_rewrite_parameters(
body_name="create_from",
)
- @_stability_warning(Stability.EXPERIMENTAL)
async def create_from(
self,
*,
@@ -844,7 +842,7 @@ async def create_from(
async def data_streams_stats(
self,
*,
- name: t.Optional[str] = None,
+ name: t.Optional[t.Union[str, t.Sequence[str]]] = None,
error_trace: t.Optional[bool] = None,
expand_wildcards: t.Optional[
t.Union[
@@ -1300,6 +1298,62 @@ async def delete_index_template(
path_parts=__path_parts,
)
+ @_rewrite_parameters()
+ @_stability_warning(Stability.EXPERIMENTAL)
+ async def delete_sample_configuration(
+ self,
+ *,
+ index: str,
+ error_trace: t.Optional[bool] = None,
+ filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+ human: t.Optional[bool] = None,
+ master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
+ pretty: t.Optional[bool] = None,
+ timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
+ ) -> ObjectApiResponse[t.Any]:
+ """
+ .. raw:: html
+
+ Delete sampling configuration.
+ Delete the sampling configuration for the specified index.
+
+
+ ``_
+
+ :param index: The name of the index.
+ :param master_timeout: Period to wait for a connection to the master node. If
+ no response is received before the timeout expires, the request fails and
+ returns an error.
+ :param timeout: Period to wait for a response. If no response is received before
+ the timeout expires, the request fails and returns an error.
+ """
+ if index in SKIP_IN_PATH:
+ raise ValueError("Empty value passed for parameter 'index'")
+ __path_parts: t.Dict[str, str] = {"index": _quote(index)}
+ __path = f'/{__path_parts["index"]}/_sample/config'
+ __query: t.Dict[str, t.Any] = {}
+ if error_trace is not None:
+ __query["error_trace"] = error_trace
+ if filter_path is not None:
+ __query["filter_path"] = filter_path
+ if human is not None:
+ __query["human"] = human
+ if master_timeout is not None:
+ __query["master_timeout"] = master_timeout
+ if pretty is not None:
+ __query["pretty"] = pretty
+ if timeout is not None:
+ __query["timeout"] = timeout
+ __headers = {"accept": "application/json"}
+ return await self.perform_request( # type: ignore[return-value]
+ "DELETE",
+ __path,
+ params=__query,
+ headers=__headers,
+ endpoint_id="indices.delete_sample_configuration",
+ path_parts=__path_parts,
+ )
+
@_rewrite_parameters()
async def delete_template(
self,
@@ -1467,12 +1521,17 @@ async def downsample(
.. raw:: html
Downsample an index.
- Aggregate a time series (TSDS) index and store pre-computed statistical summaries (min, max, sum, value_count and avg) for each metric field grouped by a configured time interval.
- For example, a TSDS index that contains metrics sampled every 10 seconds can be downsampled to an hourly index.
+ Downsamples a time series (TSDS) index and reduces its size by keeping the last value or by pre-aggregating metrics:
+
+ - When running in
aggregate mode, it pre-calculates and stores statistical summaries (min, max, sum, value_count and avg)
+ for each metric field grouped by a configured time interval and their dimensions.
+ - When running in
last_value mode, it keeps the last value for each metric in the configured interval and their dimensions.
+
+ For example, a TSDS index that contains metrics sampled every 10 seconds can be downsampled to an hourly index.
All documents within an hour interval are summarized and stored as a single document in the downsample index.
NOTE: Only indices in a time series data stream are supported.
Neither field nor document level security can be defined on the source index.
- The source index must be read only (index.blocks.write: true).
+ The source index must be read-only (index.blocks.write: true).
``_
@@ -2355,6 +2414,53 @@ async def get_alias(
path_parts=__path_parts,
)
+ @_rewrite_parameters()
+ @_stability_warning(Stability.EXPERIMENTAL)
+ async def get_all_sample_configuration(
+ self,
+ *,
+ error_trace: t.Optional[bool] = None,
+ filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+ human: t.Optional[bool] = None,
+ master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
+ pretty: t.Optional[bool] = None,
+ ) -> ObjectApiResponse[t.Any]:
+ """
+ .. raw:: html
+
+ Get all sampling configurations.
+ Get the sampling configurations for all indices.
+
+
+ ``_
+
+ :param master_timeout: Period to wait for a connection to the master node. If
+ no response is received before the timeout expires, the request fails and
+ returns an error.
+ """
+ __path_parts: t.Dict[str, str] = {}
+ __path = "/_sample/config"
+ __query: t.Dict[str, t.Any] = {}
+ if error_trace is not None:
+ __query["error_trace"] = error_trace
+ if filter_path is not None:
+ __query["filter_path"] = filter_path
+ if human is not None:
+ __query["human"] = human
+ if master_timeout is not None:
+ __query["master_timeout"] = master_timeout
+ if pretty is not None:
+ __query["pretty"] = pretty
+ __headers = {"accept": "application/json"}
+ return await self.perform_request( # type: ignore[return-value]
+ "GET",
+ __path,
+ params=__query,
+ headers=__headers,
+ endpoint_id="indices.get_all_sample_configuration",
+ path_parts=__path_parts,
+ )
+
@_rewrite_parameters()
async def get_data_lifecycle(
self,
@@ -2815,8 +2921,8 @@ async def get_index_template(
``_
- :param name: Comma-separated list of index template names used to limit the request.
- Wildcard (*) expressions are supported.
+ :param name: Name of index template to retrieve. Wildcard (*) expressions are
+ supported.
:param flat_settings: If true, returns settings in flat format.
:param include_defaults: If true, returns all relevant default configurations
for the index template.
@@ -2947,7 +3053,6 @@ async def get_mapping(
)
@_rewrite_parameters()
- @_stability_warning(Stability.EXPERIMENTAL)
async def get_migrate_reindex_status(
self,
*,
@@ -3035,6 +3140,57 @@ async def get_sample(
path_parts=__path_parts,
)
+ @_rewrite_parameters()
+ @_stability_warning(Stability.EXPERIMENTAL)
+ async def get_sample_configuration(
+ self,
+ *,
+ index: str,
+ error_trace: t.Optional[bool] = None,
+ filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+ human: t.Optional[bool] = None,
+ master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
+ pretty: t.Optional[bool] = None,
+ ) -> ObjectApiResponse[t.Any]:
+ """
+ .. raw:: html
+
+ Get sampling configuration.
+ Get the sampling configuration for the specified index.
+
+
+ ``_
+
+ :param index: The name of the index.
+ :param master_timeout: Period to wait for a connection to the master node. If
+ no response is received before the timeout expires, the request fails and
+ returns an error.
+ """
+ if index in SKIP_IN_PATH:
+ raise ValueError("Empty value passed for parameter 'index'")
+ __path_parts: t.Dict[str, str] = {"index": _quote(index)}
+ __path = f'/{__path_parts["index"]}/_sample/config'
+ __query: t.Dict[str, t.Any] = {}
+ if error_trace is not None:
+ __query["error_trace"] = error_trace
+ if filter_path is not None:
+ __query["filter_path"] = filter_path
+ if human is not None:
+ __query["human"] = human
+ if master_timeout is not None:
+ __query["master_timeout"] = master_timeout
+ if pretty is not None:
+ __query["pretty"] = pretty
+ __headers = {"accept": "application/json"}
+ return await self.perform_request( # type: ignore[return-value]
+ "GET",
+ __path,
+ params=__query,
+ headers=__headers,
+ endpoint_id="indices.get_sample_configuration",
+ path_parts=__path_parts,
+ )
+
@_rewrite_parameters()
@_stability_warning(Stability.EXPERIMENTAL)
async def get_sample_stats(
@@ -3251,7 +3407,6 @@ async def get_template(
@_rewrite_parameters(
body_name="reindex",
)
- @_stability_warning(Stability.EXPERIMENTAL)
async def migrate_reindex(
self,
*,
@@ -3701,7 +3856,7 @@ async def put_data_lifecycle(
*,
name: t.Union[str, t.Sequence[str]],
data_retention: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
- downsampling: t.Optional[t.Mapping[str, t.Any]] = None,
+ downsampling: t.Optional[t.Sequence[t.Mapping[str, t.Any]]] = None,
enabled: t.Optional[bool] = None,
error_trace: t.Optional[bool] = None,
expand_wildcards: t.Optional[
@@ -4357,6 +4512,95 @@ async def put_mapping(
path_parts=__path_parts,
)
+ @_rewrite_parameters(
+ body_fields=("rate", "if_", "max_samples", "max_size", "time_to_live"),
+ parameter_aliases={"if": "if_"},
+ )
+ @_stability_warning(Stability.EXPERIMENTAL)
+ async def put_sample_configuration(
+ self,
+ *,
+ index: str,
+ rate: t.Optional[t.Union[str, t.Any]] = None,
+ error_trace: t.Optional[bool] = None,
+ filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+ human: t.Optional[bool] = None,
+ if_: t.Optional[str] = None,
+ master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
+ max_samples: t.Optional[int] = None,
+ max_size: t.Optional[t.Union[int, str]] = None,
+ pretty: t.Optional[bool] = None,
+ time_to_live: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
+ timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
+ body: t.Optional[t.Dict[str, t.Any]] = None,
+ ) -> ObjectApiResponse[t.Any]:
+ """
+ .. raw:: html
+
+ Create or update sampling configuration.
+ Create or update the sampling configuration for the specified index.
+
+
+ ``_
+
+ :param index: The name of the index or data stream.
+ :param rate: The fraction of documents to sample. Must be greater than 0 and
+ less than or equal to 1. Can be specified as a number or a string.
+ :param if_: An optional condition script that sampled documents must satisfy.
+ :param master_timeout: Period to wait for a connection to the master node. If
+ no response is received before the timeout expires, the request fails and
+ returns an error.
+ :param max_samples: The maximum number of documents to sample. Must be greater
+ than 0 and less than or equal to 10,000.
+ :param max_size: The maximum total size of sampled documents. Must be greater
+ than 0 and less than or equal to 5GB.
+ :param time_to_live: The duration for which the sampled documents should be retained.
+ Must be greater than 0 and less than or equal to 30 days.
+ :param timeout: Period to wait for a response. If no response is received before
+ the timeout expires, the request fails and returns an error.
+ """
+ if index in SKIP_IN_PATH:
+ raise ValueError("Empty value passed for parameter 'index'")
+ if rate is None and body is None:
+ raise ValueError("Empty value passed for parameter 'rate'")
+ __path_parts: t.Dict[str, str] = {"index": _quote(index)}
+ __path = f'/{__path_parts["index"]}/_sample/config'
+ __query: t.Dict[str, t.Any] = {}
+ __body: t.Dict[str, t.Any] = body if body is not None else {}
+ if error_trace is not None:
+ __query["error_trace"] = error_trace
+ if filter_path is not None:
+ __query["filter_path"] = filter_path
+ if human is not None:
+ __query["human"] = human
+ if master_timeout is not None:
+ __query["master_timeout"] = master_timeout
+ if pretty is not None:
+ __query["pretty"] = pretty
+ if timeout is not None:
+ __query["timeout"] = timeout
+ if not __body:
+ if rate is not None:
+ __body["rate"] = rate
+ if if_ is not None:
+ __body["if"] = if_
+ if max_samples is not None:
+ __body["max_samples"] = max_samples
+ if max_size is not None:
+ __body["max_size"] = max_size
+ if time_to_live is not None:
+ __body["time_to_live"] = time_to_live
+ __headers = {"accept": "application/json", "content-type": "application/json"}
+ return await self.perform_request( # type: ignore[return-value]
+ "PUT",
+ __path,
+ params=__query,
+ headers=__headers,
+ body=__body,
+ endpoint_id="indices.put_sample_configuration",
+ path_parts=__path_parts,
+ )
+
@_rewrite_parameters(
body_name="settings",
)
@@ -4893,7 +5137,7 @@ async def reload_search_analyzers(
async def remove_block(
self,
*,
- index: str,
+ index: t.Union[str, t.Sequence[str]],
block: t.Union[str, t.Literal["metadata", "read", "read_only", "write"]],
allow_no_indices: t.Optional[bool] = None,
error_trace: t.Optional[bool] = None,
@@ -5994,7 +6238,66 @@ async def stats(
self,
*,
index: t.Optional[t.Union[str, t.Sequence[str]]] = None,
- metric: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+ metric: t.Optional[
+ t.Union[
+ t.Sequence[
+ t.Union[
+ str,
+ t.Literal[
+ "_all",
+ "bulk",
+ "completion",
+ "dense_vector",
+ "docs",
+ "fielddata",
+ "flush",
+ "get",
+ "indexing",
+ "mappings",
+ "merge",
+ "query_cache",
+ "recovery",
+ "refresh",
+ "request_cache",
+ "search",
+ "segments",
+ "shard_stats",
+ "sparse_vector",
+ "store",
+ "translog",
+ "warmer",
+ ],
+ ]
+ ],
+ t.Union[
+ str,
+ t.Literal[
+ "_all",
+ "bulk",
+ "completion",
+ "dense_vector",
+ "docs",
+ "fielddata",
+ "flush",
+ "get",
+ "indexing",
+ "mappings",
+ "merge",
+ "query_cache",
+ "recovery",
+ "refresh",
+ "request_cache",
+ "search",
+ "segments",
+ "shard_stats",
+ "sparse_vector",
+ "store",
+ "translog",
+ "warmer",
+ ],
+ ],
+ ]
+ ] = None,
completion_fields: t.Optional[t.Union[str, t.Sequence[str]]] = None,
error_trace: t.Optional[bool] = None,
expand_wildcards: t.Optional[
diff --git a/elasticsearch/_async/client/inference.py b/elasticsearch/_async/client/inference.py
index 5412c863b..8cf4f1a28 100644
--- a/elasticsearch/_async/client/inference.py
+++ b/elasticsearch/_async/client/inference.py
@@ -55,7 +55,9 @@ async def completion(
:param inference_id: The inference Id
:param input: Inference input. Either a string or an array of strings.
- :param task_settings: Optional task settings
+ :param task_settings: Task settings for the individual inference request. These
+ settings are specific to the you specified and override the task
+ settings specified when initializing the service.
:param timeout: Specifies the amount of time to wait for the inference request
to complete.
"""
@@ -120,15 +122,17 @@ async def delete(
"""
.. raw:: html
- Delete an inference endpoint
+ Delete an inference endpoint
+ This API requires the manage_inference cluster privilege (the built-in inference_admin role grants this privilege).
``_
:param inference_id: The inference identifier.
:param task_type: The task type
- :param dry_run: When true, the endpoint is not deleted and a list of ingest processors
- which reference this endpoint is returned.
+ :param dry_run: When true, checks the semantic_text fields and inference processors
+ that reference the endpoint and returns them in a list, but does not delete
+ the endpoint.
:param force: When true, the inference endpoint is forcefully deleted even if
it is still being used by ingest processors or semantic text fields.
"""
@@ -194,7 +198,8 @@ async def get(
"""
.. raw:: html
- Get an inference endpoint
+ Get an inference endpoint
+ This API requires the monitor_inference cluster privilege (the built-in inference_admin and inference_user roles grant this privilege).
``_
@@ -548,7 +553,7 @@ async def put_alibabacloud(
self,
*,
task_type: t.Union[
- str, t.Literal["completion", "rerank", "space_embedding", "text_embedding"]
+ str, t.Literal["completion", "rerank", "sparse_embedding", "text_embedding"]
],
alibabacloud_inference_id: str,
service: t.Optional[t.Union[str, t.Literal["alibabacloud-ai-search"]]] = None,
@@ -577,7 +582,9 @@ async def put_alibabacloud(
this case, `alibabacloud-ai-search`.
:param service_settings: Settings used to install the inference model. These
settings are specific to the `alibabacloud-ai-search` service.
- :param chunking_settings: The chunking configuration object.
+ :param chunking_settings: The chunking configuration object. Applies only to
+ the `sparse_embedding` or `text_embedding` task types. Not applicable to
+ the `rerank` or `completion` task types.
:param task_settings: Settings to configure the inference task. These settings
are specific to the task type you specified.
:param timeout: Specifies the amount of time to wait for the inference endpoint
@@ -673,7 +680,8 @@ async def put_amazonbedrock(
this case, `amazonbedrock`.
:param service_settings: Settings used to install the inference model. These
settings are specific to the `amazonbedrock` service.
- :param chunking_settings: The chunking configuration object.
+ :param chunking_settings: The chunking configuration object. Applies only to
+ the `text_embedding` task type. Not applicable to the `completion` task type.
:param task_settings: Settings to configure the inference task. These settings
are specific to the task type you specified.
:param timeout: Specifies the amount of time to wait for the inference endpoint
@@ -775,7 +783,9 @@ async def put_amazonsagemaker(
:param service_settings: Settings used to install the inference model. These
settings are specific to the `amazon_sagemaker` service and `service_settings.api`
you specified.
- :param chunking_settings: The chunking configuration object.
+ :param chunking_settings: The chunking configuration object. Applies only to
+ the `sparse_embedding` or `text_embedding` task types. Not applicable to
+ the `rerank`, `completion`, or `chat_completion` task types.
:param task_settings: Settings to configure the inference task. These settings
are specific to the task type and `service_settings.api` you specified.
:param timeout: Specifies the amount of time to wait for the inference endpoint
@@ -829,12 +839,7 @@ async def put_amazonsagemaker(
)
@_rewrite_parameters(
- body_fields=(
- "service",
- "service_settings",
- "chunking_settings",
- "task_settings",
- ),
+ body_fields=("service", "service_settings", "task_settings"),
)
async def put_anthropic(
self,
@@ -843,7 +848,6 @@ async def put_anthropic(
anthropic_inference_id: str,
service: t.Optional[t.Union[str, t.Literal["anthropic"]]] = None,
service_settings: t.Optional[t.Mapping[str, t.Any]] = None,
- chunking_settings: t.Optional[t.Mapping[str, t.Any]] = None,
error_trace: t.Optional[bool] = None,
filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
human: t.Optional[bool] = None,
@@ -867,8 +871,7 @@ async def put_anthropic(
:param service: The type of service supported for the specified task type. In
this case, `anthropic`.
:param service_settings: Settings used to install the inference model. These
- settings are specific to the `watsonxai` service.
- :param chunking_settings: The chunking configuration object.
+ settings are specific to the `anthropic` service.
:param task_settings: Settings to configure the inference task. These settings
are specific to the task type you specified.
:param timeout: Specifies the amount of time to wait for the inference endpoint
@@ -906,8 +909,6 @@ async def put_anthropic(
__body["service"] = service
if service_settings is not None:
__body["service_settings"] = service_settings
- if chunking_settings is not None:
- __body["chunking_settings"] = chunking_settings
if task_settings is not None:
__body["task_settings"] = task_settings
__headers = {"accept": "application/json", "content-type": "application/json"}
@@ -959,8 +960,10 @@ async def put_azureaistudio(
:param service: The type of service supported for the specified task type. In
this case, `azureaistudio`.
:param service_settings: Settings used to install the inference model. These
- settings are specific to the `openai` service.
- :param chunking_settings: The chunking configuration object.
+ settings are specific to the `azureaistudio` service.
+ :param chunking_settings: The chunking configuration object. Applies only to
+ the `text_embedding` task type. Not applicable to the `rerank` or `completion`
+ task types.
:param task_settings: Settings to configure the inference task. These settings
are specific to the task type you specified.
:param timeout: Specifies the amount of time to wait for the inference endpoint
@@ -1060,7 +1063,8 @@ async def put_azureopenai(
this case, `azureopenai`.
:param service_settings: Settings used to install the inference model. These
settings are specific to the `azureopenai` service.
- :param chunking_settings: The chunking configuration object.
+ :param chunking_settings: The chunking configuration object. Applies only to
+ the `text_embedding` task type. Not applicable to the `completion` task type.
:param task_settings: Settings to configure the inference task. These settings
are specific to the task type you specified.
:param timeout: Specifies the amount of time to wait for the inference endpoint
@@ -1152,7 +1156,9 @@ async def put_cohere(
this case, `cohere`.
:param service_settings: Settings used to install the inference model. These
settings are specific to the `cohere` service.
- :param chunking_settings: The chunking configuration object.
+ :param chunking_settings: The chunking configuration object. Applies only to
+ the `text_embedding` task type. Not applicable to the `rerank` or `completion`
+ task type.
:param task_settings: Settings to configure the inference task. These settings
are specific to the task type you specified.
:param timeout: Specifies the amount of time to wait for the inference endpoint
@@ -1204,12 +1210,7 @@ async def put_cohere(
)
@_rewrite_parameters(
- body_fields=(
- "service",
- "service_settings",
- "chunking_settings",
- "task_settings",
- ),
+ body_fields=("service", "service_settings", "task_settings"),
)
async def put_contextualai(
self,
@@ -1218,7 +1219,6 @@ async def put_contextualai(
contextualai_inference_id: str,
service: t.Optional[t.Union[str, t.Literal["contextualai"]]] = None,
service_settings: t.Optional[t.Mapping[str, t.Any]] = None,
- chunking_settings: t.Optional[t.Mapping[str, t.Any]] = None,
error_trace: t.Optional[bool] = None,
filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
human: t.Optional[bool] = None,
@@ -1243,7 +1243,6 @@ async def put_contextualai(
this case, `contextualai`.
:param service_settings: Settings used to install the inference model. These
settings are specific to the `contextualai` service.
- :param chunking_settings: The chunking configuration object.
:param task_settings: Settings to configure the inference task. These settings
are specific to the task type you specified.
:param timeout: Specifies the amount of time to wait for the inference endpoint
@@ -1281,8 +1280,6 @@ async def put_contextualai(
__body["service"] = service
if service_settings is not None:
__body["service_settings"] = service_settings
- if chunking_settings is not None:
- __body["chunking_settings"] = chunking_settings
if task_settings is not None:
__body["task_settings"] = task_settings
__headers = {"accept": "application/json", "content-type": "application/json"}
@@ -1376,7 +1373,9 @@ async def put_custom(
this case, `custom`.
:param service_settings: Settings used to install the inference model. These
settings are specific to the `custom` service.
- :param chunking_settings: The chunking configuration object.
+ :param chunking_settings: The chunking configuration object. Applies only to
+ the `sparse_embedding` or `text_embedding` task types. Not applicable to
+ the `rerank` or `completion` task types.
:param task_settings: Settings to configure the inference task. These settings
are specific to the task type you specified.
"""
@@ -1424,7 +1423,7 @@ async def put_custom(
)
@_rewrite_parameters(
- body_fields=("service", "service_settings", "chunking_settings"),
+ body_fields=("service", "service_settings"),
)
async def put_deepseek(
self,
@@ -1433,7 +1432,6 @@ async def put_deepseek(
deepseek_inference_id: str,
service: t.Optional[t.Union[str, t.Literal["deepseek"]]] = None,
service_settings: t.Optional[t.Mapping[str, t.Any]] = None,
- chunking_settings: t.Optional[t.Mapping[str, t.Any]] = None,
error_trace: t.Optional[bool] = None,
filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
human: t.Optional[bool] = None,
@@ -1456,7 +1454,6 @@ async def put_deepseek(
this case, `deepseek`.
:param service_settings: Settings used to install the inference model. These
settings are specific to the `deepseek` service.
- :param chunking_settings: The chunking configuration object.
:param timeout: Specifies the amount of time to wait for the inference endpoint
to be created.
"""
@@ -1490,8 +1487,6 @@ async def put_deepseek(
__body["service"] = service
if service_settings is not None:
__body["service_settings"] = service_settings
- if chunking_settings is not None:
- __body["chunking_settings"] = chunking_settings
__headers = {"accept": "application/json", "content-type": "application/json"}
return await self.perform_request( # type: ignore[return-value]
"PUT",
@@ -1560,7 +1555,7 @@ async def put_elasticsearch(
settings are specific to the `elasticsearch` service.
:param chunking_settings: The chunking configuration object. Applies only to
the `sparse_embedding` and `text_embedding` task types. Not applicable to
- the `rerank`, `completion`, or `chat_completion` task types.
+ the `rerank` task type.
:param task_settings: Settings to configure the inference task. These settings
are specific to the task type you specified.
:param timeout: Specifies the amount of time to wait for the inference endpoint
@@ -1741,7 +1736,8 @@ async def put_googleaistudio(
this case, `googleaistudio`.
:param service_settings: Settings used to install the inference model. These
settings are specific to the `googleaistudio` service.
- :param chunking_settings: The chunking configuration object.
+ :param chunking_settings: The chunking configuration object. Applies only to
+ the `text_embedding` task type. Not applicable to the `completion` task type.
:param timeout: Specifies the amount of time to wait for the inference endpoint
to be created.
"""
@@ -1831,7 +1827,9 @@ async def put_googlevertexai(
this case, `googlevertexai`.
:param service_settings: Settings used to install the inference model. These
settings are specific to the `googlevertexai` service.
- :param chunking_settings: The chunking configuration object.
+ :param chunking_settings: The chunking configuration object. Applies only to
+ the `text_embedding` task type. Not applicable to the `rerank`, `completion`,
+ or `chat_completion` task types.
:param task_settings: Settings to configure the inference task. These settings
are specific to the task type you specified.
:param timeout: Specifies the amount of time to wait for the inference endpoint
@@ -1959,7 +1957,9 @@ async def put_hugging_face(
this case, `hugging_face`.
:param service_settings: Settings used to install the inference model. These
settings are specific to the `hugging_face` service.
- :param chunking_settings: The chunking configuration object.
+ :param chunking_settings: The chunking configuration object. Applies only to
+ the `text_embedding` task type. Not applicable to the `rerank`, `completion`,
+ or `chat_completion` task types.
:param task_settings: Settings to configure the inference task. These settings
are specific to the task type you specified.
:param timeout: Specifies the amount of time to wait for the inference endpoint
@@ -2053,7 +2053,8 @@ async def put_jinaai(
this case, `jinaai`.
:param service_settings: Settings used to install the inference model. These
settings are specific to the `jinaai` service.
- :param chunking_settings: The chunking configuration object.
+ :param chunking_settings: The chunking configuration object. Applies only to
+ the `text_embedding` task type. Not applicable to the `rerank` task type.
:param task_settings: Settings to configure the inference task. These settings
are specific to the task type you specified.
:param timeout: Specifies the amount of time to wait for the inference endpoint
@@ -2139,7 +2140,9 @@ async def put_llama(
this case, `llama`.
:param service_settings: Settings used to install the inference model. These
settings are specific to the `llama` service.
- :param chunking_settings: The chunking configuration object.
+ :param chunking_settings: The chunking configuration object. Applies only to
+ the `text_embedding` task type. Not applicable to the `completion` or `chat_completion`
+ task types.
:param timeout: Specifies the amount of time to wait for the inference endpoint
to be created.
"""
@@ -2221,7 +2224,9 @@ async def put_mistral(
this case, `mistral`.
:param service_settings: Settings used to install the inference model. These
settings are specific to the `mistral` service.
- :param chunking_settings: The chunking configuration object.
+ :param chunking_settings: The chunking configuration object. Applies only to
+ the `text_embedding` task type. Not applicable to the `completion` or `chat_completion`
+ task types.
:param timeout: Specifies the amount of time to wait for the inference endpoint
to be created.
"""
@@ -2311,7 +2316,9 @@ async def put_openai(
this case, `openai`.
:param service_settings: Settings used to install the inference model. These
settings are specific to the `openai` service.
- :param chunking_settings: The chunking configuration object.
+ :param chunking_settings: The chunking configuration object. Applies only to
+ the `text_embedding` task type. Not applicable to the `completion` or `chat_completion`
+ task types.
:param task_settings: Settings to configure the inference task. These settings
are specific to the task type you specified.
:param timeout: Specifies the amount of time to wait for the inference endpoint
@@ -2402,7 +2409,8 @@ async def put_voyageai(
this case, `voyageai`.
:param service_settings: Settings used to install the inference model. These
settings are specific to the `voyageai` service.
- :param chunking_settings: The chunking configuration object.
+ :param chunking_settings: The chunking configuration object. Applies only to
+ the `text_embedding` task type. Not applicable to the `rerank` task type.
:param task_settings: Settings to configure the inference task. These settings
are specific to the task type you specified.
:param timeout: Specifies the amount of time to wait for the inference endpoint
@@ -2454,7 +2462,7 @@ async def put_voyageai(
)
@_rewrite_parameters(
- body_fields=("service", "service_settings"),
+ body_fields=("service", "service_settings", "chunking_settings"),
)
async def put_watsonx(
self,
@@ -2465,6 +2473,7 @@ async def put_watsonx(
watsonx_inference_id: str,
service: t.Optional[t.Union[str, t.Literal["watsonxai"]]] = None,
service_settings: t.Optional[t.Mapping[str, t.Any]] = None,
+ chunking_settings: t.Optional[t.Mapping[str, t.Any]] = None,
error_trace: t.Optional[bool] = None,
filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
human: t.Optional[bool] = None,
@@ -2489,6 +2498,9 @@ async def put_watsonx(
this case, `watsonxai`.
:param service_settings: Settings used to install the inference model. These
settings are specific to the `watsonxai` service.
+ :param chunking_settings: The chunking configuration object. Applies only to
+ the `text_embedding` task type. Not applicable to the `completion` or `chat_completion`
+ task types.
:param timeout: Specifies the amount of time to wait for the inference endpoint
to be created.
"""
@@ -2522,6 +2534,8 @@ async def put_watsonx(
__body["service"] = service
if service_settings is not None:
__body["service_settings"] = service_settings
+ if chunking_settings is not None:
+ __body["chunking_settings"] = chunking_settings
__headers = {"accept": "application/json", "content-type": "application/json"}
return await self.perform_request( # type: ignore[return-value]
"PUT",
@@ -2638,7 +2652,9 @@ async def sparse_embedding(
:param inference_id: The inference Id
:param input: Inference input. Either a string or an array of strings.
- :param task_settings: Optional task settings
+ :param task_settings: Task settings for the individual inference request. These
+ settings are specific to the you specified and override the task
+ settings specified when initializing the service.
:param timeout: Specifies the amount of time to wait for the inference request
to complete.
"""
@@ -2710,7 +2726,9 @@ async def text_embedding(
to the relevant service-specific documentation for more info. > info > The
`input_type` parameter specified on the root level of the request body will
take precedence over the `input_type` parameter specified in `task_settings`.
- :param task_settings: Optional task settings
+ :param task_settings: Task settings for the individual inference request. These
+ settings are specific to the you specified and override the task
+ settings specified when initializing the service.
:param timeout: Specifies the amount of time to wait for the inference request
to complete.
"""
diff --git a/elasticsearch/_async/client/nodes.py b/elasticsearch/_async/client/nodes.py
index 1ed5cd5eb..be18489b2 100644
--- a/elasticsearch/_async/client/nodes.py
+++ b/elasticsearch/_async/client/nodes.py
@@ -220,7 +220,50 @@ async def info(
self,
*,
node_id: t.Optional[t.Union[str, t.Sequence[str]]] = None,
- metric: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+ metric: t.Optional[
+ t.Union[
+ t.Sequence[
+ t.Union[
+ str,
+ t.Literal[
+ "_all",
+ "_none",
+ "aggregations",
+ "http",
+ "indices",
+ "ingest",
+ "jvm",
+ "os",
+ "plugins",
+ "process",
+ "remote_cluster_server",
+ "settings",
+ "thread_pool",
+ "transport",
+ ],
+ ]
+ ],
+ t.Union[
+ str,
+ t.Literal[
+ "_all",
+ "_none",
+ "aggregations",
+ "http",
+ "indices",
+ "ingest",
+ "jvm",
+ "os",
+ "plugins",
+ "process",
+ "remote_cluster_server",
+ "settings",
+ "thread_pool",
+ "transport",
+ ],
+ ],
+ ]
+ ] = None,
error_trace: t.Optional[bool] = None,
filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
flat_settings: t.Optional[bool] = None,
@@ -357,8 +400,120 @@ async def stats(
self,
*,
node_id: t.Optional[t.Union[str, t.Sequence[str]]] = None,
- metric: t.Optional[t.Union[str, t.Sequence[str]]] = None,
- index_metric: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+ metric: t.Optional[
+ t.Union[
+ t.Sequence[
+ t.Union[
+ str,
+ t.Literal[
+ "_all",
+ "_none",
+ "adaptive_selection",
+ "allocations",
+ "breaker",
+ "discovery",
+ "fs",
+ "http",
+ "indexing_pressure",
+ "indices",
+ "ingest",
+ "jvm",
+ "os",
+ "process",
+ "repositories",
+ "script",
+ "script_cache",
+ "thread_pool",
+ "transport",
+ ],
+ ]
+ ],
+ t.Union[
+ str,
+ t.Literal[
+ "_all",
+ "_none",
+ "adaptive_selection",
+ "allocations",
+ "breaker",
+ "discovery",
+ "fs",
+ "http",
+ "indexing_pressure",
+ "indices",
+ "ingest",
+ "jvm",
+ "os",
+ "process",
+ "repositories",
+ "script",
+ "script_cache",
+ "thread_pool",
+ "transport",
+ ],
+ ],
+ ]
+ ] = None,
+ index_metric: t.Optional[
+ t.Union[
+ t.Sequence[
+ t.Union[
+ str,
+ t.Literal[
+ "_all",
+ "bulk",
+ "completion",
+ "dense_vector",
+ "docs",
+ "fielddata",
+ "flush",
+ "get",
+ "indexing",
+ "mappings",
+ "merge",
+ "query_cache",
+ "recovery",
+ "refresh",
+ "request_cache",
+ "search",
+ "segments",
+ "shard_stats",
+ "sparse_vector",
+ "store",
+ "translog",
+ "warmer",
+ ],
+ ]
+ ],
+ t.Union[
+ str,
+ t.Literal[
+ "_all",
+ "bulk",
+ "completion",
+ "dense_vector",
+ "docs",
+ "fielddata",
+ "flush",
+ "get",
+ "indexing",
+ "mappings",
+ "merge",
+ "query_cache",
+ "recovery",
+ "refresh",
+ "request_cache",
+ "search",
+ "segments",
+ "shard_stats",
+ "sparse_vector",
+ "store",
+ "translog",
+ "warmer",
+ ],
+ ],
+ ]
+ ] = None,
completion_fields: t.Optional[t.Union[str, t.Sequence[str]]] = None,
error_trace: t.Optional[bool] = None,
fielddata_fields: t.Optional[t.Union[str, t.Sequence[str]]] = None,
@@ -483,7 +638,14 @@ async def usage(
self,
*,
node_id: t.Optional[t.Union[str, t.Sequence[str]]] = None,
- metric: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+ metric: t.Optional[
+ t.Union[
+ t.Sequence[
+ t.Union[str, t.Literal["_all", "aggregations", "rest_actions"]]
+ ],
+ t.Union[str, t.Literal["_all", "aggregations", "rest_actions"]],
+ ]
+ ] = None,
error_trace: t.Optional[bool] = None,
filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
human: t.Optional[bool] = None,
@@ -502,7 +664,7 @@ async def usage(
information; use `_local` to return information from the node you're connecting
to, leave empty to get information from all nodes
:param metric: Limits the information returned to the specific metrics. A comma-separated
- list of the following options: `_all`, `rest_actions`.
+ list of the following options: `_all`, `rest_actions`, `aggregations`.
:param timeout: Period to wait for a response. If no response is received before
the timeout expires, the request fails and returns an error.
"""
diff --git a/elasticsearch/_async/client/security.py b/elasticsearch/_async/client/security.py
index b01be9e93..6830c97ee 100644
--- a/elasticsearch/_async/client/security.py
+++ b/elasticsearch/_async/client/security.py
@@ -477,7 +477,7 @@ async def clear_api_key_cache(
async def clear_cached_privileges(
self,
*,
- application: str,
+ application: t.Union[str, t.Sequence[str]],
error_trace: t.Optional[bool] = None,
filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
human: t.Optional[bool] = None,
@@ -3763,7 +3763,8 @@ async def query_role(
:param size: The number of hits to return. It must not be negative. By default,
you cannot page through more than 10,000 hits using the `from` and `size`
parameters. To page through more hits, use the `search_after` parameter.
- :param sort: The sort definition. You can sort on `username`, `roles`, or `enabled`.
+ :param sort: The sort definition. You can sort on `name`, `description`, `metadata`,
+ `applications.application`, `applications.privileges`, and `applications.resources`.
In addition, sort can also be applied to the `_doc` field to sort by index
order.
"""
diff --git a/elasticsearch/_async/client/snapshot.py b/elasticsearch/_async/client/snapshot.py
index 249bb0100..afe3aabb8 100644
--- a/elasticsearch/_async/client/snapshot.py
+++ b/elasticsearch/_async/client/snapshot.py
@@ -397,7 +397,7 @@ async def delete(
self,
*,
repository: str,
- snapshot: str,
+ snapshot: t.Union[str, t.Sequence[str]],
error_trace: t.Optional[bool] = None,
filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
human: t.Optional[bool] = None,
diff --git a/elasticsearch/_async/client/text_structure.py b/elasticsearch/_async/client/text_structure.py
index 6307f20bb..e5c0686b3 100644
--- a/elasticsearch/_async/client/text_structure.py
+++ b/elasticsearch/_async/client/text_structure.py
@@ -31,7 +31,7 @@ async def find_field_structure(
*,
field: str,
index: str,
- column_names: t.Optional[str] = None,
+ column_names: t.Optional[t.Union[str, t.Sequence[str]]] = None,
delimiter: t.Optional[str] = None,
documents_to_sample: t.Optional[int] = None,
ecs_compatibility: t.Optional[t.Union[str, t.Literal["disabled", "v1"]]] = None,
@@ -217,7 +217,7 @@ async def find_message_structure(
self,
*,
messages: t.Optional[t.Sequence[str]] = None,
- column_names: t.Optional[str] = None,
+ column_names: t.Optional[t.Union[str, t.Sequence[str]]] = None,
delimiter: t.Optional[str] = None,
ecs_compatibility: t.Optional[t.Union[str, t.Literal["disabled", "v1"]]] = None,
error_trace: t.Optional[bool] = None,
diff --git a/elasticsearch/_sync/client/__init__.py b/elasticsearch/_sync/client/__init__.py
index 498f411c1..72024a85e 100644
--- a/elasticsearch/_sync/client/__init__.py
+++ b/elasticsearch/_sync/client/__init__.py
@@ -2131,7 +2131,7 @@ def field_caps(
] = None,
fields: t.Optional[t.Union[str, t.Sequence[str]]] = None,
filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
- filters: t.Optional[str] = None,
+ filters: t.Optional[t.Union[str, t.Sequence[str]]] = None,
human: t.Optional[bool] = None,
ignore_unavailable: t.Optional[bool] = None,
include_empty_fields: t.Optional[bool] = None,
@@ -5597,7 +5597,7 @@ def search_template(
def terms_enum(
self,
*,
- index: str,
+ index: t.Union[str, t.Sequence[str]],
field: t.Optional[str] = None,
case_insensitive: t.Optional[bool] = None,
error_trace: t.Optional[bool] = None,
diff --git a/elasticsearch/_sync/client/cat.py b/elasticsearch/_sync/client/cat.py
index 102a28d7b..01399bea0 100644
--- a/elasticsearch/_sync/client/cat.py
+++ b/elasticsearch/_sync/client/cat.py
@@ -3310,10 +3310,20 @@ def segments(
self,
*,
index: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+ allow_closed: t.Optional[bool] = None,
+ allow_no_indices: t.Optional[bool] = None,
bytes: t.Optional[
t.Union[str, t.Literal["b", "gb", "kb", "mb", "pb", "tb"]]
] = None,
error_trace: t.Optional[bool] = None,
+ expand_wildcards: t.Optional[
+ t.Union[
+ t.Sequence[
+ t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]]
+ ],
+ t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]],
+ ]
+ ] = None,
filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
format: t.Optional[str] = None,
h: t.Optional[
@@ -3364,6 +3374,8 @@ def segments(
] = None,
help: t.Optional[bool] = None,
human: t.Optional[bool] = None,
+ ignore_throttled: t.Optional[bool] = None,
+ ignore_unavailable: t.Optional[bool] = None,
local: t.Optional[bool] = None,
master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
pretty: t.Optional[bool] = None,
@@ -3387,6 +3399,14 @@ def segments(
:param index: A comma-separated list of data streams, indices, and aliases used
to limit the request. Supports wildcards (`*`). To target all data streams
and indices, omit this parameter or use `*` or `_all`.
+ :param allow_closed: If true, allow closed indices to be returned in the response
+ otherwise if false, keep the legacy behaviour of throwing an exception if
+ index pattern matches closed indices
+ :param allow_no_indices: If false, the request returns an error if any wildcard
+ expression, index alias, or _all value targets only missing or closed indices.
+ This behavior applies even if the request targets other open indices. For
+ example, a request targeting foo*,bar* returns an error if an index starts
+ with foo but no index starts with bar.
:param bytes: Sets the units for columns that contain a byte-size value. Note
that byte-size value units work in terms of powers of 1024. For instance
`1kb` means 1024 bytes, not 1000 bytes. If omitted, byte-size values are
@@ -3395,12 +3415,20 @@ def segments(
least `1.0`. If given, byte-size values are rendered as an integer with no
suffix, representing the value of the column in the chosen unit. Values that
are not an exact multiple of the chosen unit are rounded down.
+ :param expand_wildcards: Type of index that wildcard expressions can match. If
+ the request can target data streams, this argument determines whether wildcard
+ expressions match hidden data streams. Supports comma-separated values, such
+ as open,hidden.
:param format: Specifies the format to return the columnar data in, can be set
to `text`, `json`, `cbor`, `yaml`, or `smile`.
:param h: A comma-separated list of columns names to display. It supports simple
wildcards.
:param help: When set to `true` will output available columns. This option can't
be combined with any other query string option.
+ :param ignore_throttled: If true, concrete, expanded or aliased indices are ignored
+ when frozen.
+ :param ignore_unavailable: If true, missing or closed indices are not included
+ in the response.
:param local: If `true`, the request computes the list of selected nodes from
the local cluster state. If `false` the list of selected nodes are computed
from the cluster state of the master node. In both cases the coordinating
@@ -3425,10 +3453,16 @@ def segments(
__path_parts = {}
__path = "/_cat/segments"
__query: t.Dict[str, t.Any] = {}
+ if allow_closed is not None:
+ __query["allow_closed"] = allow_closed
+ if allow_no_indices is not None:
+ __query["allow_no_indices"] = allow_no_indices
if bytes is not None:
__query["bytes"] = bytes
if error_trace is not None:
__query["error_trace"] = error_trace
+ if expand_wildcards is not None:
+ __query["expand_wildcards"] = expand_wildcards
if filter_path is not None:
__query["filter_path"] = filter_path
if format is not None:
@@ -3439,6 +3473,10 @@ def segments(
__query["help"] = help
if human is not None:
__query["human"] = human
+ if ignore_throttled is not None:
+ __query["ignore_throttled"] = ignore_throttled
+ if ignore_unavailable is not None:
+ __query["ignore_unavailable"] = ignore_unavailable
if local is not None:
__query["local"] = local
if master_timeout is not None:
diff --git a/elasticsearch/_sync/client/cluster.py b/elasticsearch/_sync/client/cluster.py
index 46cb6059f..68b380663 100644
--- a/elasticsearch/_sync/client/cluster.py
+++ b/elasticsearch/_sync/client/cluster.py
@@ -1068,7 +1068,40 @@ def reroute(
def state(
self,
*,
- metric: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+ metric: t.Optional[
+ t.Union[
+ t.Sequence[
+ t.Union[
+ str,
+ t.Literal[
+ "_all",
+ "blocks",
+ "customs",
+ "master_node",
+ "metadata",
+ "nodes",
+ "routing_nodes",
+ "routing_table",
+ "version",
+ ],
+ ]
+ ],
+ t.Union[
+ str,
+ t.Literal[
+ "_all",
+ "blocks",
+ "customs",
+ "master_node",
+ "metadata",
+ "nodes",
+ "routing_nodes",
+ "routing_table",
+ "version",
+ ],
+ ],
+ ]
+ ] = None,
index: t.Optional[t.Union[str, t.Sequence[str]]] = None,
allow_no_indices: t.Optional[bool] = None,
error_trace: t.Optional[bool] = None,
diff --git a/elasticsearch/_sync/client/indices.py b/elasticsearch/_sync/client/indices.py
index f249d2dd8..5ca5bd4a2 100644
--- a/elasticsearch/_sync/client/indices.py
+++ b/elasticsearch/_sync/client/indices.py
@@ -35,7 +35,7 @@ class IndicesClient(NamespacedClient):
def add_block(
self,
*,
- index: str,
+ index: t.Union[str, t.Sequence[str]],
block: t.Union[str, t.Literal["metadata", "read", "read_only", "write"]],
allow_no_indices: t.Optional[bool] = None,
error_trace: t.Optional[bool] = None,
@@ -244,7 +244,6 @@ def analyze(
)
@_rewrite_parameters()
- @_stability_warning(Stability.EXPERIMENTAL)
def cancel_migrate_reindex(
self,
*,
@@ -778,7 +777,6 @@ def create_data_stream(
@_rewrite_parameters(
body_name="create_from",
)
- @_stability_warning(Stability.EXPERIMENTAL)
def create_from(
self,
*,
@@ -844,7 +842,7 @@ def create_from(
def data_streams_stats(
self,
*,
- name: t.Optional[str] = None,
+ name: t.Optional[t.Union[str, t.Sequence[str]]] = None,
error_trace: t.Optional[bool] = None,
expand_wildcards: t.Optional[
t.Union[
@@ -1300,6 +1298,62 @@ def delete_index_template(
path_parts=__path_parts,
)
+ @_rewrite_parameters()
+ @_stability_warning(Stability.EXPERIMENTAL)
+ def delete_sample_configuration(
+ self,
+ *,
+ index: str,
+ error_trace: t.Optional[bool] = None,
+ filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+ human: t.Optional[bool] = None,
+ master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
+ pretty: t.Optional[bool] = None,
+ timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
+ ) -> ObjectApiResponse[t.Any]:
+ """
+ .. raw:: html
+
+ Delete sampling configuration.
+ Delete the sampling configuration for the specified index.
+
+
+ ``_
+
+ :param index: The name of the index.
+ :param master_timeout: Period to wait for a connection to the master node. If
+ no response is received before the timeout expires, the request fails and
+ returns an error.
+ :param timeout: Period to wait for a response. If no response is received before
+ the timeout expires, the request fails and returns an error.
+ """
+ if index in SKIP_IN_PATH:
+ raise ValueError("Empty value passed for parameter 'index'")
+ __path_parts: t.Dict[str, str] = {"index": _quote(index)}
+ __path = f'/{__path_parts["index"]}/_sample/config'
+ __query: t.Dict[str, t.Any] = {}
+ if error_trace is not None:
+ __query["error_trace"] = error_trace
+ if filter_path is not None:
+ __query["filter_path"] = filter_path
+ if human is not None:
+ __query["human"] = human
+ if master_timeout is not None:
+ __query["master_timeout"] = master_timeout
+ if pretty is not None:
+ __query["pretty"] = pretty
+ if timeout is not None:
+ __query["timeout"] = timeout
+ __headers = {"accept": "application/json"}
+ return self.perform_request( # type: ignore[return-value]
+ "DELETE",
+ __path,
+ params=__query,
+ headers=__headers,
+ endpoint_id="indices.delete_sample_configuration",
+ path_parts=__path_parts,
+ )
+
@_rewrite_parameters()
def delete_template(
self,
@@ -1467,12 +1521,17 @@ def downsample(
.. raw:: html
Downsample an index.
- Aggregate a time series (TSDS) index and store pre-computed statistical summaries (min, max, sum, value_count and avg) for each metric field grouped by a configured time interval.
- For example, a TSDS index that contains metrics sampled every 10 seconds can be downsampled to an hourly index.
+ Downsamples a time series (TSDS) index and reduces its size by keeping the last value or by pre-aggregating metrics:
+
+ - When running in
aggregate mode, it pre-calculates and stores statistical summaries (min, max, sum, value_count and avg)
+ for each metric field grouped by a configured time interval and their dimensions.
+ - When running in
last_value mode, it keeps the last value for each metric in the configured interval and their dimensions.
+
+ For example, a TSDS index that contains metrics sampled every 10 seconds can be downsampled to an hourly index.
All documents within an hour interval are summarized and stored as a single document in the downsample index.
NOTE: Only indices in a time series data stream are supported.
Neither field nor document level security can be defined on the source index.
- The source index must be read only (index.blocks.write: true).
+ The source index must be read-only (index.blocks.write: true).
``_
@@ -2355,6 +2414,53 @@ def get_alias(
path_parts=__path_parts,
)
+ @_rewrite_parameters()
+ @_stability_warning(Stability.EXPERIMENTAL)
+ def get_all_sample_configuration(
+ self,
+ *,
+ error_trace: t.Optional[bool] = None,
+ filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+ human: t.Optional[bool] = None,
+ master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
+ pretty: t.Optional[bool] = None,
+ ) -> ObjectApiResponse[t.Any]:
+ """
+ .. raw:: html
+
+ Get all sampling configurations.
+ Get the sampling configurations for all indices.
+
+
+ ``_
+
+ :param master_timeout: Period to wait for a connection to the master node. If
+ no response is received before the timeout expires, the request fails and
+ returns an error.
+ """
+ __path_parts: t.Dict[str, str] = {}
+ __path = "/_sample/config"
+ __query: t.Dict[str, t.Any] = {}
+ if error_trace is not None:
+ __query["error_trace"] = error_trace
+ if filter_path is not None:
+ __query["filter_path"] = filter_path
+ if human is not None:
+ __query["human"] = human
+ if master_timeout is not None:
+ __query["master_timeout"] = master_timeout
+ if pretty is not None:
+ __query["pretty"] = pretty
+ __headers = {"accept": "application/json"}
+ return self.perform_request( # type: ignore[return-value]
+ "GET",
+ __path,
+ params=__query,
+ headers=__headers,
+ endpoint_id="indices.get_all_sample_configuration",
+ path_parts=__path_parts,
+ )
+
@_rewrite_parameters()
def get_data_lifecycle(
self,
@@ -2815,8 +2921,8 @@ def get_index_template(
``_
- :param name: Comma-separated list of index template names used to limit the request.
- Wildcard (*) expressions are supported.
+ :param name: Name of index template to retrieve. Wildcard (*) expressions are
+ supported.
:param flat_settings: If true, returns settings in flat format.
:param include_defaults: If true, returns all relevant default configurations
for the index template.
@@ -2947,7 +3053,6 @@ def get_mapping(
)
@_rewrite_parameters()
- @_stability_warning(Stability.EXPERIMENTAL)
def get_migrate_reindex_status(
self,
*,
@@ -3035,6 +3140,57 @@ def get_sample(
path_parts=__path_parts,
)
+ @_rewrite_parameters()
+ @_stability_warning(Stability.EXPERIMENTAL)
+ def get_sample_configuration(
+ self,
+ *,
+ index: str,
+ error_trace: t.Optional[bool] = None,
+ filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+ human: t.Optional[bool] = None,
+ master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
+ pretty: t.Optional[bool] = None,
+ ) -> ObjectApiResponse[t.Any]:
+ """
+ .. raw:: html
+
+ Get sampling configuration.
+ Get the sampling configuration for the specified index.
+
+
+ ``_
+
+ :param index: The name of the index.
+ :param master_timeout: Period to wait for a connection to the master node. If
+ no response is received before the timeout expires, the request fails and
+ returns an error.
+ """
+ if index in SKIP_IN_PATH:
+ raise ValueError("Empty value passed for parameter 'index'")
+ __path_parts: t.Dict[str, str] = {"index": _quote(index)}
+ __path = f'/{__path_parts["index"]}/_sample/config'
+ __query: t.Dict[str, t.Any] = {}
+ if error_trace is not None:
+ __query["error_trace"] = error_trace
+ if filter_path is not None:
+ __query["filter_path"] = filter_path
+ if human is not None:
+ __query["human"] = human
+ if master_timeout is not None:
+ __query["master_timeout"] = master_timeout
+ if pretty is not None:
+ __query["pretty"] = pretty
+ __headers = {"accept": "application/json"}
+ return self.perform_request( # type: ignore[return-value]
+ "GET",
+ __path,
+ params=__query,
+ headers=__headers,
+ endpoint_id="indices.get_sample_configuration",
+ path_parts=__path_parts,
+ )
+
@_rewrite_parameters()
@_stability_warning(Stability.EXPERIMENTAL)
def get_sample_stats(
@@ -3251,7 +3407,6 @@ def get_template(
@_rewrite_parameters(
body_name="reindex",
)
- @_stability_warning(Stability.EXPERIMENTAL)
def migrate_reindex(
self,
*,
@@ -3701,7 +3856,7 @@ def put_data_lifecycle(
*,
name: t.Union[str, t.Sequence[str]],
data_retention: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
- downsampling: t.Optional[t.Mapping[str, t.Any]] = None,
+ downsampling: t.Optional[t.Sequence[t.Mapping[str, t.Any]]] = None,
enabled: t.Optional[bool] = None,
error_trace: t.Optional[bool] = None,
expand_wildcards: t.Optional[
@@ -4357,6 +4512,95 @@ def put_mapping(
path_parts=__path_parts,
)
+ @_rewrite_parameters(
+ body_fields=("rate", "if_", "max_samples", "max_size", "time_to_live"),
+ parameter_aliases={"if": "if_"},
+ )
+ @_stability_warning(Stability.EXPERIMENTAL)
+ def put_sample_configuration(
+ self,
+ *,
+ index: str,
+ rate: t.Optional[t.Union[str, t.Any]] = None,
+ error_trace: t.Optional[bool] = None,
+ filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+ human: t.Optional[bool] = None,
+ if_: t.Optional[str] = None,
+ master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
+ max_samples: t.Optional[int] = None,
+ max_size: t.Optional[t.Union[int, str]] = None,
+ pretty: t.Optional[bool] = None,
+ time_to_live: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
+ timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
+ body: t.Optional[t.Dict[str, t.Any]] = None,
+ ) -> ObjectApiResponse[t.Any]:
+ """
+ .. raw:: html
+
+ Create or update sampling configuration.
+ Create or update the sampling configuration for the specified index.
+
+
+ ``_
+
+ :param index: The name of the index or data stream.
+ :param rate: The fraction of documents to sample. Must be greater than 0 and
+ less than or equal to 1. Can be specified as a number or a string.
+ :param if_: An optional condition script that sampled documents must satisfy.
+ :param master_timeout: Period to wait for a connection to the master node. If
+ no response is received before the timeout expires, the request fails and
+ returns an error.
+ :param max_samples: The maximum number of documents to sample. Must be greater
+ than 0 and less than or equal to 10,000.
+ :param max_size: The maximum total size of sampled documents. Must be greater
+ than 0 and less than or equal to 5GB.
+ :param time_to_live: The duration for which the sampled documents should be retained.
+ Must be greater than 0 and less than or equal to 30 days.
+ :param timeout: Period to wait for a response. If no response is received before
+ the timeout expires, the request fails and returns an error.
+ """
+ if index in SKIP_IN_PATH:
+ raise ValueError("Empty value passed for parameter 'index'")
+ if rate is None and body is None:
+ raise ValueError("Empty value passed for parameter 'rate'")
+ __path_parts: t.Dict[str, str] = {"index": _quote(index)}
+ __path = f'/{__path_parts["index"]}/_sample/config'
+ __query: t.Dict[str, t.Any] = {}
+ __body: t.Dict[str, t.Any] = body if body is not None else {}
+ if error_trace is not None:
+ __query["error_trace"] = error_trace
+ if filter_path is not None:
+ __query["filter_path"] = filter_path
+ if human is not None:
+ __query["human"] = human
+ if master_timeout is not None:
+ __query["master_timeout"] = master_timeout
+ if pretty is not None:
+ __query["pretty"] = pretty
+ if timeout is not None:
+ __query["timeout"] = timeout
+ if not __body:
+ if rate is not None:
+ __body["rate"] = rate
+ if if_ is not None:
+ __body["if"] = if_
+ if max_samples is not None:
+ __body["max_samples"] = max_samples
+ if max_size is not None:
+ __body["max_size"] = max_size
+ if time_to_live is not None:
+ __body["time_to_live"] = time_to_live
+ __headers = {"accept": "application/json", "content-type": "application/json"}
+ return self.perform_request( # type: ignore[return-value]
+ "PUT",
+ __path,
+ params=__query,
+ headers=__headers,
+ body=__body,
+ endpoint_id="indices.put_sample_configuration",
+ path_parts=__path_parts,
+ )
+
@_rewrite_parameters(
body_name="settings",
)
@@ -4893,7 +5137,7 @@ def reload_search_analyzers(
def remove_block(
self,
*,
- index: str,
+ index: t.Union[str, t.Sequence[str]],
block: t.Union[str, t.Literal["metadata", "read", "read_only", "write"]],
allow_no_indices: t.Optional[bool] = None,
error_trace: t.Optional[bool] = None,
@@ -5994,7 +6238,66 @@ def stats(
self,
*,
index: t.Optional[t.Union[str, t.Sequence[str]]] = None,
- metric: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+ metric: t.Optional[
+ t.Union[
+ t.Sequence[
+ t.Union[
+ str,
+ t.Literal[
+ "_all",
+ "bulk",
+ "completion",
+ "dense_vector",
+ "docs",
+ "fielddata",
+ "flush",
+ "get",
+ "indexing",
+ "mappings",
+ "merge",
+ "query_cache",
+ "recovery",
+ "refresh",
+ "request_cache",
+ "search",
+ "segments",
+ "shard_stats",
+ "sparse_vector",
+ "store",
+ "translog",
+ "warmer",
+ ],
+ ]
+ ],
+ t.Union[
+ str,
+ t.Literal[
+ "_all",
+ "bulk",
+ "completion",
+ "dense_vector",
+ "docs",
+ "fielddata",
+ "flush",
+ "get",
+ "indexing",
+ "mappings",
+ "merge",
+ "query_cache",
+ "recovery",
+ "refresh",
+ "request_cache",
+ "search",
+ "segments",
+ "shard_stats",
+ "sparse_vector",
+ "store",
+ "translog",
+ "warmer",
+ ],
+ ],
+ ]
+ ] = None,
completion_fields: t.Optional[t.Union[str, t.Sequence[str]]] = None,
error_trace: t.Optional[bool] = None,
expand_wildcards: t.Optional[
diff --git a/elasticsearch/_sync/client/inference.py b/elasticsearch/_sync/client/inference.py
index 8f02e9bb2..0c39794dc 100644
--- a/elasticsearch/_sync/client/inference.py
+++ b/elasticsearch/_sync/client/inference.py
@@ -55,7 +55,9 @@ def completion(
:param inference_id: The inference Id
:param input: Inference input. Either a string or an array of strings.
- :param task_settings: Optional task settings
+ :param task_settings: Task settings for the individual inference request. These
+ settings are specific to the you specified and override the task
+ settings specified when initializing the service.
:param timeout: Specifies the amount of time to wait for the inference request
to complete.
"""
@@ -120,15 +122,17 @@ def delete(
"""
.. raw:: html
- Delete an inference endpoint
+ Delete an inference endpoint
+ This API requires the manage_inference cluster privilege (the built-in inference_admin role grants this privilege).
``_
:param inference_id: The inference identifier.
:param task_type: The task type
- :param dry_run: When true, the endpoint is not deleted and a list of ingest processors
- which reference this endpoint is returned.
+ :param dry_run: When true, checks the semantic_text fields and inference processors
+ that reference the endpoint and returns them in a list, but does not delete
+ the endpoint.
:param force: When true, the inference endpoint is forcefully deleted even if
it is still being used by ingest processors or semantic text fields.
"""
@@ -194,7 +198,8 @@ def get(
"""
.. raw:: html
- Get an inference endpoint
+ Get an inference endpoint
+ This API requires the monitor_inference cluster privilege (the built-in inference_admin and inference_user roles grant this privilege).
``_
@@ -548,7 +553,7 @@ def put_alibabacloud(
self,
*,
task_type: t.Union[
- str, t.Literal["completion", "rerank", "space_embedding", "text_embedding"]
+ str, t.Literal["completion", "rerank", "sparse_embedding", "text_embedding"]
],
alibabacloud_inference_id: str,
service: t.Optional[t.Union[str, t.Literal["alibabacloud-ai-search"]]] = None,
@@ -577,7 +582,9 @@ def put_alibabacloud(
this case, `alibabacloud-ai-search`.
:param service_settings: Settings used to install the inference model. These
settings are specific to the `alibabacloud-ai-search` service.
- :param chunking_settings: The chunking configuration object.
+ :param chunking_settings: The chunking configuration object. Applies only to
+ the `sparse_embedding` or `text_embedding` task types. Not applicable to
+ the `rerank` or `completion` task types.
:param task_settings: Settings to configure the inference task. These settings
are specific to the task type you specified.
:param timeout: Specifies the amount of time to wait for the inference endpoint
@@ -673,7 +680,8 @@ def put_amazonbedrock(
this case, `amazonbedrock`.
:param service_settings: Settings used to install the inference model. These
settings are specific to the `amazonbedrock` service.
- :param chunking_settings: The chunking configuration object.
+ :param chunking_settings: The chunking configuration object. Applies only to
+ the `text_embedding` task type. Not applicable to the `completion` task type.
:param task_settings: Settings to configure the inference task. These settings
are specific to the task type you specified.
:param timeout: Specifies the amount of time to wait for the inference endpoint
@@ -775,7 +783,9 @@ def put_amazonsagemaker(
:param service_settings: Settings used to install the inference model. These
settings are specific to the `amazon_sagemaker` service and `service_settings.api`
you specified.
- :param chunking_settings: The chunking configuration object.
+ :param chunking_settings: The chunking configuration object. Applies only to
+ the `sparse_embedding` or `text_embedding` task types. Not applicable to
+ the `rerank`, `completion`, or `chat_completion` task types.
:param task_settings: Settings to configure the inference task. These settings
are specific to the task type and `service_settings.api` you specified.
:param timeout: Specifies the amount of time to wait for the inference endpoint
@@ -829,12 +839,7 @@ def put_amazonsagemaker(
)
@_rewrite_parameters(
- body_fields=(
- "service",
- "service_settings",
- "chunking_settings",
- "task_settings",
- ),
+ body_fields=("service", "service_settings", "task_settings"),
)
def put_anthropic(
self,
@@ -843,7 +848,6 @@ def put_anthropic(
anthropic_inference_id: str,
service: t.Optional[t.Union[str, t.Literal["anthropic"]]] = None,
service_settings: t.Optional[t.Mapping[str, t.Any]] = None,
- chunking_settings: t.Optional[t.Mapping[str, t.Any]] = None,
error_trace: t.Optional[bool] = None,
filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
human: t.Optional[bool] = None,
@@ -867,8 +871,7 @@ def put_anthropic(
:param service: The type of service supported for the specified task type. In
this case, `anthropic`.
:param service_settings: Settings used to install the inference model. These
- settings are specific to the `watsonxai` service.
- :param chunking_settings: The chunking configuration object.
+ settings are specific to the `anthropic` service.
:param task_settings: Settings to configure the inference task. These settings
are specific to the task type you specified.
:param timeout: Specifies the amount of time to wait for the inference endpoint
@@ -906,8 +909,6 @@ def put_anthropic(
__body["service"] = service
if service_settings is not None:
__body["service_settings"] = service_settings
- if chunking_settings is not None:
- __body["chunking_settings"] = chunking_settings
if task_settings is not None:
__body["task_settings"] = task_settings
__headers = {"accept": "application/json", "content-type": "application/json"}
@@ -959,8 +960,10 @@ def put_azureaistudio(
:param service: The type of service supported for the specified task type. In
this case, `azureaistudio`.
:param service_settings: Settings used to install the inference model. These
- settings are specific to the `openai` service.
- :param chunking_settings: The chunking configuration object.
+ settings are specific to the `azureaistudio` service.
+ :param chunking_settings: The chunking configuration object. Applies only to
+ the `text_embedding` task type. Not applicable to the `rerank` or `completion`
+ task types.
:param task_settings: Settings to configure the inference task. These settings
are specific to the task type you specified.
:param timeout: Specifies the amount of time to wait for the inference endpoint
@@ -1060,7 +1063,8 @@ def put_azureopenai(
this case, `azureopenai`.
:param service_settings: Settings used to install the inference model. These
settings are specific to the `azureopenai` service.
- :param chunking_settings: The chunking configuration object.
+ :param chunking_settings: The chunking configuration object. Applies only to
+ the `text_embedding` task type. Not applicable to the `completion` task type.
:param task_settings: Settings to configure the inference task. These settings
are specific to the task type you specified.
:param timeout: Specifies the amount of time to wait for the inference endpoint
@@ -1152,7 +1156,9 @@ def put_cohere(
this case, `cohere`.
:param service_settings: Settings used to install the inference model. These
settings are specific to the `cohere` service.
- :param chunking_settings: The chunking configuration object.
+ :param chunking_settings: The chunking configuration object. Applies only to
+ the `text_embedding` task type. Not applicable to the `rerank` or `completion`
+ task type.
:param task_settings: Settings to configure the inference task. These settings
are specific to the task type you specified.
:param timeout: Specifies the amount of time to wait for the inference endpoint
@@ -1204,12 +1210,7 @@ def put_cohere(
)
@_rewrite_parameters(
- body_fields=(
- "service",
- "service_settings",
- "chunking_settings",
- "task_settings",
- ),
+ body_fields=("service", "service_settings", "task_settings"),
)
def put_contextualai(
self,
@@ -1218,7 +1219,6 @@ def put_contextualai(
contextualai_inference_id: str,
service: t.Optional[t.Union[str, t.Literal["contextualai"]]] = None,
service_settings: t.Optional[t.Mapping[str, t.Any]] = None,
- chunking_settings: t.Optional[t.Mapping[str, t.Any]] = None,
error_trace: t.Optional[bool] = None,
filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
human: t.Optional[bool] = None,
@@ -1243,7 +1243,6 @@ def put_contextualai(
this case, `contextualai`.
:param service_settings: Settings used to install the inference model. These
settings are specific to the `contextualai` service.
- :param chunking_settings: The chunking configuration object.
:param task_settings: Settings to configure the inference task. These settings
are specific to the task type you specified.
:param timeout: Specifies the amount of time to wait for the inference endpoint
@@ -1281,8 +1280,6 @@ def put_contextualai(
__body["service"] = service
if service_settings is not None:
__body["service_settings"] = service_settings
- if chunking_settings is not None:
- __body["chunking_settings"] = chunking_settings
if task_settings is not None:
__body["task_settings"] = task_settings
__headers = {"accept": "application/json", "content-type": "application/json"}
@@ -1376,7 +1373,9 @@ def put_custom(
this case, `custom`.
:param service_settings: Settings used to install the inference model. These
settings are specific to the `custom` service.
- :param chunking_settings: The chunking configuration object.
+ :param chunking_settings: The chunking configuration object. Applies only to
+ the `sparse_embedding` or `text_embedding` task types. Not applicable to
+ the `rerank` or `completion` task types.
:param task_settings: Settings to configure the inference task. These settings
are specific to the task type you specified.
"""
@@ -1424,7 +1423,7 @@ def put_custom(
)
@_rewrite_parameters(
- body_fields=("service", "service_settings", "chunking_settings"),
+ body_fields=("service", "service_settings"),
)
def put_deepseek(
self,
@@ -1433,7 +1432,6 @@ def put_deepseek(
deepseek_inference_id: str,
service: t.Optional[t.Union[str, t.Literal["deepseek"]]] = None,
service_settings: t.Optional[t.Mapping[str, t.Any]] = None,
- chunking_settings: t.Optional[t.Mapping[str, t.Any]] = None,
error_trace: t.Optional[bool] = None,
filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
human: t.Optional[bool] = None,
@@ -1456,7 +1454,6 @@ def put_deepseek(
this case, `deepseek`.
:param service_settings: Settings used to install the inference model. These
settings are specific to the `deepseek` service.
- :param chunking_settings: The chunking configuration object.
:param timeout: Specifies the amount of time to wait for the inference endpoint
to be created.
"""
@@ -1490,8 +1487,6 @@ def put_deepseek(
__body["service"] = service
if service_settings is not None:
__body["service_settings"] = service_settings
- if chunking_settings is not None:
- __body["chunking_settings"] = chunking_settings
__headers = {"accept": "application/json", "content-type": "application/json"}
return self.perform_request( # type: ignore[return-value]
"PUT",
@@ -1560,7 +1555,7 @@ def put_elasticsearch(
settings are specific to the `elasticsearch` service.
:param chunking_settings: The chunking configuration object. Applies only to
the `sparse_embedding` and `text_embedding` task types. Not applicable to
- the `rerank`, `completion`, or `chat_completion` task types.
+ the `rerank` task type.
:param task_settings: Settings to configure the inference task. These settings
are specific to the task type you specified.
:param timeout: Specifies the amount of time to wait for the inference endpoint
@@ -1741,7 +1736,8 @@ def put_googleaistudio(
this case, `googleaistudio`.
:param service_settings: Settings used to install the inference model. These
settings are specific to the `googleaistudio` service.
- :param chunking_settings: The chunking configuration object.
+ :param chunking_settings: The chunking configuration object. Applies only to
+ the `text_embedding` task type. Not applicable to the `completion` task type.
:param timeout: Specifies the amount of time to wait for the inference endpoint
to be created.
"""
@@ -1831,7 +1827,9 @@ def put_googlevertexai(
this case, `googlevertexai`.
:param service_settings: Settings used to install the inference model. These
settings are specific to the `googlevertexai` service.
- :param chunking_settings: The chunking configuration object.
+ :param chunking_settings: The chunking configuration object. Applies only to
+ the `text_embedding` task type. Not applicable to the `rerank`, `completion`,
+ or `chat_completion` task types.
:param task_settings: Settings to configure the inference task. These settings
are specific to the task type you specified.
:param timeout: Specifies the amount of time to wait for the inference endpoint
@@ -1959,7 +1957,9 @@ def put_hugging_face(
this case, `hugging_face`.
:param service_settings: Settings used to install the inference model. These
settings are specific to the `hugging_face` service.
- :param chunking_settings: The chunking configuration object.
+ :param chunking_settings: The chunking configuration object. Applies only to
+ the `text_embedding` task type. Not applicable to the `rerank`, `completion`,
+ or `chat_completion` task types.
:param task_settings: Settings to configure the inference task. These settings
are specific to the task type you specified.
:param timeout: Specifies the amount of time to wait for the inference endpoint
@@ -2053,7 +2053,8 @@ def put_jinaai(
this case, `jinaai`.
:param service_settings: Settings used to install the inference model. These
settings are specific to the `jinaai` service.
- :param chunking_settings: The chunking configuration object.
+ :param chunking_settings: The chunking configuration object. Applies only to
+ the `text_embedding` task type. Not applicable to the `rerank` task type.
:param task_settings: Settings to configure the inference task. These settings
are specific to the task type you specified.
:param timeout: Specifies the amount of time to wait for the inference endpoint
@@ -2139,7 +2140,9 @@ def put_llama(
this case, `llama`.
:param service_settings: Settings used to install the inference model. These
settings are specific to the `llama` service.
- :param chunking_settings: The chunking configuration object.
+ :param chunking_settings: The chunking configuration object. Applies only to
+ the `text_embedding` task type. Not applicable to the `completion` or `chat_completion`
+ task types.
:param timeout: Specifies the amount of time to wait for the inference endpoint
to be created.
"""
@@ -2221,7 +2224,9 @@ def put_mistral(
this case, `mistral`.
:param service_settings: Settings used to install the inference model. These
settings are specific to the `mistral` service.
- :param chunking_settings: The chunking configuration object.
+ :param chunking_settings: The chunking configuration object. Applies only to
+ the `text_embedding` task type. Not applicable to the `completion` or `chat_completion`
+ task types.
:param timeout: Specifies the amount of time to wait for the inference endpoint
to be created.
"""
@@ -2311,7 +2316,9 @@ def put_openai(
this case, `openai`.
:param service_settings: Settings used to install the inference model. These
settings are specific to the `openai` service.
- :param chunking_settings: The chunking configuration object.
+ :param chunking_settings: The chunking configuration object. Applies only to
+ the `text_embedding` task type. Not applicable to the `completion` or `chat_completion`
+ task types.
:param task_settings: Settings to configure the inference task. These settings
are specific to the task type you specified.
:param timeout: Specifies the amount of time to wait for the inference endpoint
@@ -2402,7 +2409,8 @@ def put_voyageai(
this case, `voyageai`.
:param service_settings: Settings used to install the inference model. These
settings are specific to the `voyageai` service.
- :param chunking_settings: The chunking configuration object.
+ :param chunking_settings: The chunking configuration object. Applies only to
+ the `text_embedding` task type. Not applicable to the `rerank` task type.
:param task_settings: Settings to configure the inference task. These settings
are specific to the task type you specified.
:param timeout: Specifies the amount of time to wait for the inference endpoint
@@ -2454,7 +2462,7 @@ def put_voyageai(
)
@_rewrite_parameters(
- body_fields=("service", "service_settings"),
+ body_fields=("service", "service_settings", "chunking_settings"),
)
def put_watsonx(
self,
@@ -2465,6 +2473,7 @@ def put_watsonx(
watsonx_inference_id: str,
service: t.Optional[t.Union[str, t.Literal["watsonxai"]]] = None,
service_settings: t.Optional[t.Mapping[str, t.Any]] = None,
+ chunking_settings: t.Optional[t.Mapping[str, t.Any]] = None,
error_trace: t.Optional[bool] = None,
filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
human: t.Optional[bool] = None,
@@ -2489,6 +2498,9 @@ def put_watsonx(
this case, `watsonxai`.
:param service_settings: Settings used to install the inference model. These
settings are specific to the `watsonxai` service.
+ :param chunking_settings: The chunking configuration object. Applies only to
+ the `text_embedding` task type. Not applicable to the `completion` or `chat_completion`
+ task types.
:param timeout: Specifies the amount of time to wait for the inference endpoint
to be created.
"""
@@ -2522,6 +2534,8 @@ def put_watsonx(
__body["service"] = service
if service_settings is not None:
__body["service_settings"] = service_settings
+ if chunking_settings is not None:
+ __body["chunking_settings"] = chunking_settings
__headers = {"accept": "application/json", "content-type": "application/json"}
return self.perform_request( # type: ignore[return-value]
"PUT",
@@ -2638,7 +2652,9 @@ def sparse_embedding(
:param inference_id: The inference Id
:param input: Inference input. Either a string or an array of strings.
- :param task_settings: Optional task settings
+ :param task_settings: Task settings for the individual inference request. These
+ settings are specific to the you specified and override the task
+ settings specified when initializing the service.
:param timeout: Specifies the amount of time to wait for the inference request
to complete.
"""
@@ -2710,7 +2726,9 @@ def text_embedding(
to the relevant service-specific documentation for more info. > info > The
`input_type` parameter specified on the root level of the request body will
take precedence over the `input_type` parameter specified in `task_settings`.
- :param task_settings: Optional task settings
+ :param task_settings: Task settings for the individual inference request. These
+ settings are specific to the you specified and override the task
+ settings specified when initializing the service.
:param timeout: Specifies the amount of time to wait for the inference request
to complete.
"""
diff --git a/elasticsearch/_sync/client/nodes.py b/elasticsearch/_sync/client/nodes.py
index 9cf656d16..12b311e46 100644
--- a/elasticsearch/_sync/client/nodes.py
+++ b/elasticsearch/_sync/client/nodes.py
@@ -220,7 +220,50 @@ def info(
self,
*,
node_id: t.Optional[t.Union[str, t.Sequence[str]]] = None,
- metric: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+ metric: t.Optional[
+ t.Union[
+ t.Sequence[
+ t.Union[
+ str,
+ t.Literal[
+ "_all",
+ "_none",
+ "aggregations",
+ "http",
+ "indices",
+ "ingest",
+ "jvm",
+ "os",
+ "plugins",
+ "process",
+ "remote_cluster_server",
+ "settings",
+ "thread_pool",
+ "transport",
+ ],
+ ]
+ ],
+ t.Union[
+ str,
+ t.Literal[
+ "_all",
+ "_none",
+ "aggregations",
+ "http",
+ "indices",
+ "ingest",
+ "jvm",
+ "os",
+ "plugins",
+ "process",
+ "remote_cluster_server",
+ "settings",
+ "thread_pool",
+ "transport",
+ ],
+ ],
+ ]
+ ] = None,
error_trace: t.Optional[bool] = None,
filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
flat_settings: t.Optional[bool] = None,
@@ -357,8 +400,120 @@ def stats(
self,
*,
node_id: t.Optional[t.Union[str, t.Sequence[str]]] = None,
- metric: t.Optional[t.Union[str, t.Sequence[str]]] = None,
- index_metric: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+ metric: t.Optional[
+ t.Union[
+ t.Sequence[
+ t.Union[
+ str,
+ t.Literal[
+ "_all",
+ "_none",
+ "adaptive_selection",
+ "allocations",
+ "breaker",
+ "discovery",
+ "fs",
+ "http",
+ "indexing_pressure",
+ "indices",
+ "ingest",
+ "jvm",
+ "os",
+ "process",
+ "repositories",
+ "script",
+ "script_cache",
+ "thread_pool",
+ "transport",
+ ],
+ ]
+ ],
+ t.Union[
+ str,
+ t.Literal[
+ "_all",
+ "_none",
+ "adaptive_selection",
+ "allocations",
+ "breaker",
+ "discovery",
+ "fs",
+ "http",
+ "indexing_pressure",
+ "indices",
+ "ingest",
+ "jvm",
+ "os",
+ "process",
+ "repositories",
+ "script",
+ "script_cache",
+ "thread_pool",
+ "transport",
+ ],
+ ],
+ ]
+ ] = None,
+ index_metric: t.Optional[
+ t.Union[
+ t.Sequence[
+ t.Union[
+ str,
+ t.Literal[
+ "_all",
+ "bulk",
+ "completion",
+ "dense_vector",
+ "docs",
+ "fielddata",
+ "flush",
+ "get",
+ "indexing",
+ "mappings",
+ "merge",
+ "query_cache",
+ "recovery",
+ "refresh",
+ "request_cache",
+ "search",
+ "segments",
+ "shard_stats",
+ "sparse_vector",
+ "store",
+ "translog",
+ "warmer",
+ ],
+ ]
+ ],
+ t.Union[
+ str,
+ t.Literal[
+ "_all",
+ "bulk",
+ "completion",
+ "dense_vector",
+ "docs",
+ "fielddata",
+ "flush",
+ "get",
+ "indexing",
+ "mappings",
+ "merge",
+ "query_cache",
+ "recovery",
+ "refresh",
+ "request_cache",
+ "search",
+ "segments",
+ "shard_stats",
+ "sparse_vector",
+ "store",
+ "translog",
+ "warmer",
+ ],
+ ],
+ ]
+ ] = None,
completion_fields: t.Optional[t.Union[str, t.Sequence[str]]] = None,
error_trace: t.Optional[bool] = None,
fielddata_fields: t.Optional[t.Union[str, t.Sequence[str]]] = None,
@@ -483,7 +638,14 @@ def usage(
self,
*,
node_id: t.Optional[t.Union[str, t.Sequence[str]]] = None,
- metric: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+ metric: t.Optional[
+ t.Union[
+ t.Sequence[
+ t.Union[str, t.Literal["_all", "aggregations", "rest_actions"]]
+ ],
+ t.Union[str, t.Literal["_all", "aggregations", "rest_actions"]],
+ ]
+ ] = None,
error_trace: t.Optional[bool] = None,
filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
human: t.Optional[bool] = None,
@@ -502,7 +664,7 @@ def usage(
information; use `_local` to return information from the node you're connecting
to, leave empty to get information from all nodes
:param metric: Limits the information returned to the specific metrics. A comma-separated
- list of the following options: `_all`, `rest_actions`.
+ list of the following options: `_all`, `rest_actions`, `aggregations`.
:param timeout: Period to wait for a response. If no response is received before
the timeout expires, the request fails and returns an error.
"""
diff --git a/elasticsearch/_sync/client/security.py b/elasticsearch/_sync/client/security.py
index adf60bc5d..03fa1661f 100644
--- a/elasticsearch/_sync/client/security.py
+++ b/elasticsearch/_sync/client/security.py
@@ -477,7 +477,7 @@ def clear_api_key_cache(
def clear_cached_privileges(
self,
*,
- application: str,
+ application: t.Union[str, t.Sequence[str]],
error_trace: t.Optional[bool] = None,
filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
human: t.Optional[bool] = None,
@@ -3763,7 +3763,8 @@ def query_role(
:param size: The number of hits to return. It must not be negative. By default,
you cannot page through more than 10,000 hits using the `from` and `size`
parameters. To page through more hits, use the `search_after` parameter.
- :param sort: The sort definition. You can sort on `username`, `roles`, or `enabled`.
+ :param sort: The sort definition. You can sort on `name`, `description`, `metadata`,
+ `applications.application`, `applications.privileges`, and `applications.resources`.
In addition, sort can also be applied to the `_doc` field to sort by index
order.
"""
diff --git a/elasticsearch/_sync/client/snapshot.py b/elasticsearch/_sync/client/snapshot.py
index 00b7d8e38..c00805cbf 100644
--- a/elasticsearch/_sync/client/snapshot.py
+++ b/elasticsearch/_sync/client/snapshot.py
@@ -397,7 +397,7 @@ def delete(
self,
*,
repository: str,
- snapshot: str,
+ snapshot: t.Union[str, t.Sequence[str]],
error_trace: t.Optional[bool] = None,
filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
human: t.Optional[bool] = None,
diff --git a/elasticsearch/_sync/client/text_structure.py b/elasticsearch/_sync/client/text_structure.py
index fa3218f81..5b1128f6a 100644
--- a/elasticsearch/_sync/client/text_structure.py
+++ b/elasticsearch/_sync/client/text_structure.py
@@ -31,7 +31,7 @@ def find_field_structure(
*,
field: str,
index: str,
- column_names: t.Optional[str] = None,
+ column_names: t.Optional[t.Union[str, t.Sequence[str]]] = None,
delimiter: t.Optional[str] = None,
documents_to_sample: t.Optional[int] = None,
ecs_compatibility: t.Optional[t.Union[str, t.Literal["disabled", "v1"]]] = None,
@@ -217,7 +217,7 @@ def find_message_structure(
self,
*,
messages: t.Optional[t.Sequence[str]] = None,
- column_names: t.Optional[str] = None,
+ column_names: t.Optional[t.Union[str, t.Sequence[str]]] = None,
delimiter: t.Optional[str] = None,
ecs_compatibility: t.Optional[t.Union[str, t.Literal["disabled", "v1"]]] = None,
error_trace: t.Optional[bool] = None,
diff --git a/elasticsearch/_version.py b/elasticsearch/_version.py
index 71cfe65e0..a61932bf9 100644
--- a/elasticsearch/_version.py
+++ b/elasticsearch/_version.py
@@ -16,4 +16,4 @@
# under the License.
__versionstr__ = "9.2.0"
-__es_specification_commit__ = "7868bd1bdf62b05aabe90d705168f7537edc184e"
+__es_specification_commit__ = "29fdfe0c2e245430f465ab951f21da84aa68602b"
diff --git a/elasticsearch/dsl/aggs.py b/elasticsearch/dsl/aggs.py
index 2a6b2ff91..365bf12d4 100644
--- a/elasticsearch/dsl/aggs.py
+++ b/elasticsearch/dsl/aggs.py
@@ -1495,7 +1495,7 @@ def __init__(
"DefaultType",
] = DEFAULT,
field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT,
- precision: Union[float, str, "DefaultType"] = DEFAULT,
+ precision: Union[int, str, "DefaultType"] = DEFAULT,
shard_size: Union[int, "DefaultType"] = DEFAULT,
size: Union[int, "DefaultType"] = DEFAULT,
**kwargs: Any,
@@ -1516,9 +1516,9 @@ class GeoLine(Agg[_R]):
ordered by the chosen sort field.
:arg point: (required) The name of the geo_point field.
- :arg sort: (required) The name of the numeric field to use as the sort
- key for ordering the points. When the `geo_line` aggregation is
- nested inside a `time_series` aggregation, this field defaults to
+ :arg sort: The name of the numeric field to use as the sort key for
+ ordering the points. When the `geo_line` aggregation is nested
+ inside a `time_series` aggregation, this field defaults to
`@timestamp`, and any other value will result in error.
:arg include_sort: When `true`, returns an additional array of the
sort values in the feature properties.
@@ -1579,7 +1579,7 @@ def __init__(
self,
*,
field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT,
- precision: Union[float, "DefaultType"] = DEFAULT,
+ precision: Union[int, "DefaultType"] = DEFAULT,
shard_size: Union[int, "DefaultType"] = DEFAULT,
size: Union[int, "DefaultType"] = DEFAULT,
bounds: Union[
@@ -1855,9 +1855,9 @@ def __init__(
class Line(Agg[_R]):
"""
:arg point: (required) The name of the geo_point field.
- :arg sort: (required) The name of the numeric field to use as the sort
- key for ordering the points. When the `geo_line` aggregation is
- nested inside a `time_series` aggregation, this field defaults to
+ :arg sort: The name of the numeric field to use as the sort key for
+ ordering the points. When the `geo_line` aggregation is nested
+ inside a `time_series` aggregation, this field defaults to
`@timestamp`, and any other value will result in error.
:arg include_sort: When `true`, returns an additional array of the
sort values in the feature properties.
@@ -2680,7 +2680,7 @@ def __init__(
self,
*,
keyed: Union[bool, "DefaultType"] = DEFAULT,
- percents: Union[Sequence[float], "DefaultType"] = DEFAULT,
+ percents: Union[float, Sequence[float], "DefaultType"] = DEFAULT,
hdr: Union["types.HdrMethod", Dict[str, Any], "DefaultType"] = DEFAULT,
tdigest: Union["types.TDigest", Dict[str, Any], "DefaultType"] = DEFAULT,
format: Union[str, "DefaultType"] = DEFAULT,