diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/ElasticsearchAsyncClient.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/ElasticsearchAsyncClient.java index 846fe0fa3..9378bda9d 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/ElasticsearchAsyncClient.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/ElasticsearchAsyncClient.java @@ -1315,10 +1315,10 @@ public final CompletableFuture index( // ----- Endpoint: info /** - * Get cluster info. Returns basic information about the cluster. + * Get cluster info. Get basic build, version, and cluster information. * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/rest-api-root.html">Documentation * on elastic.co */ public CompletableFuture info() { diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/ElasticsearchClient.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/ElasticsearchClient.java index cedfeec43..1fc694b1f 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/ElasticsearchClient.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/ElasticsearchClient.java @@ -1334,10 +1334,10 @@ public final IndexResponse index( // ----- Endpoint: info /** - * Get cluster info. Returns basic information about the cluster. + * Get cluster info. Get basic build, version, and cluster information. * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/rest-api-root.html">Documentation * on elastic.co */ public InfoResponse info() throws IOException, ElasticsearchException { diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/_types/ElasticsearchVersionInfo.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/_types/ElasticsearchVersionInfo.java index ac2c58313..9f4c32377 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/_types/ElasticsearchVersionInfo.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/_types/ElasticsearchVersionInfo.java @@ -102,63 +102,86 @@ public static ElasticsearchVersionInfo of(Function + * API name: {@code build_date} */ public final DateTime buildDate() { return this.buildDate; } /** - * Required - API name: {@code build_flavor} + * Required - The build flavor. For example, default. + *

+ * API name: {@code build_flavor} */ public final String buildFlavor() { return this.buildFlavor; } /** - * Required - API name: {@code build_hash} + * Required - The Elasticsearch Git commit's SHA hash. + *

+ * API name: {@code build_hash} */ public final String buildHash() { return this.buildHash; } /** - * Required - API name: {@code build_snapshot} + * Required - Indicates whether the Elasticsearch build was a snapshot. + *

+ * API name: {@code build_snapshot} */ public final boolean buildSnapshot() { return this.buildSnapshot; } /** - * Required - API name: {@code build_type} + * Required - The build type that corresponds to how Elasticsearch was + * installed. For example, docker, rpm, or + * tar. + *

+ * API name: {@code build_type} */ public final String buildType() { return this.buildType; } /** - * Required - API name: {@code lucene_version} + * Required - The version number of Elasticsearch's underlying Lucene software. + *

+ * API name: {@code lucene_version} */ public final String luceneVersion() { return this.luceneVersion; } /** - * Required - API name: {@code minimum_index_compatibility_version} + * Required - The minimum index version with which the responding node can read + * from disk. + *

+ * API name: {@code minimum_index_compatibility_version} */ public final String minimumIndexCompatibilityVersion() { return this.minimumIndexCompatibilityVersion; } /** - * Required - API name: {@code minimum_wire_compatibility_version} + * Required - The minimum node version with which the responding node can + * communicate. Also the minimum version from which you can perform a rolling + * upgrade. + *

+ * API name: {@code minimum_wire_compatibility_version} */ public final String minimumWireCompatibilityVersion() { return this.minimumWireCompatibilityVersion; } /** - * Required - API name: {@code number} + * Required - The Elasticsearch version number. + *

+ * API name: {@code number} */ public final String number() { return this.number; @@ -236,7 +259,9 @@ public static class Builder extends WithJsonObjectBuilderBase private String number; /** - * Required - API name: {@code build_date} + * Required - The Elasticsearch Git commit's date. + *

+ * API name: {@code build_date} */ public final Builder buildDate(DateTime value) { this.buildDate = value; @@ -244,7 +269,9 @@ public final Builder buildDate(DateTime value) { } /** - * Required - API name: {@code build_flavor} + * Required - The build flavor. For example, default. + *

+ * API name: {@code build_flavor} */ public final Builder buildFlavor(String value) { this.buildFlavor = value; @@ -252,7 +279,9 @@ public final Builder buildFlavor(String value) { } /** - * Required - API name: {@code build_hash} + * Required - The Elasticsearch Git commit's SHA hash. + *

+ * API name: {@code build_hash} */ public final Builder buildHash(String value) { this.buildHash = value; @@ -260,7 +289,9 @@ public final Builder buildHash(String value) { } /** - * Required - API name: {@code build_snapshot} + * Required - Indicates whether the Elasticsearch build was a snapshot. + *

+ * API name: {@code build_snapshot} */ public final Builder buildSnapshot(boolean value) { this.buildSnapshot = value; @@ -268,7 +299,11 @@ public final Builder buildSnapshot(boolean value) { } /** - * Required - API name: {@code build_type} + * Required - The build type that corresponds to how Elasticsearch was + * installed. For example, docker, rpm, or + * tar. + *

+ * API name: {@code build_type} */ public final Builder buildType(String value) { this.buildType = value; @@ -276,7 +311,9 @@ public final Builder buildType(String value) { } /** - * Required - API name: {@code lucene_version} + * Required - The version number of Elasticsearch's underlying Lucene software. + *

+ * API name: {@code lucene_version} */ public final Builder luceneVersion(String value) { this.luceneVersion = value; @@ -284,7 +321,10 @@ public final Builder luceneVersion(String value) { } /** - * Required - API name: {@code minimum_index_compatibility_version} + * Required - The minimum index version with which the responding node can read + * from disk. + *

+ * API name: {@code minimum_index_compatibility_version} */ public final Builder minimumIndexCompatibilityVersion(String value) { this.minimumIndexCompatibilityVersion = value; @@ -292,7 +332,11 @@ public final Builder minimumIndexCompatibilityVersion(String value) { } /** - * Required - API name: {@code minimum_wire_compatibility_version} + * Required - The minimum node version with which the responding node can + * communicate. Also the minimum version from which you can perform a rolling + * upgrade. + *

+ * API name: {@code minimum_wire_compatibility_version} */ public final Builder minimumWireCompatibilityVersion(String value) { this.minimumWireCompatibilityVersion = value; @@ -300,7 +344,9 @@ public final Builder minimumWireCompatibilityVersion(String value) { } /** - * Required - API name: {@code number} + * Required - The Elasticsearch version number. + *

+ * API name: {@code number} */ public final Builder number(String value) { this.number = value; diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/cluster/DeleteComponentTemplateRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/cluster/DeleteComponentTemplateRequest.java index f4a533d34..095f694f7 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/cluster/DeleteComponentTemplateRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/cluster/DeleteComponentTemplateRequest.java @@ -58,9 +58,9 @@ // typedef: cluster.delete_component_template.Request /** - * Delete component templates. Deletes component templates. Component templates - * are building blocks for constructing index templates that specify index - * mappings, settings, and aliases. + * Delete component templates. Component templates are building blocks for + * constructing index templates that specify index mappings, settings, and + * aliases. * * @see API diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/cluster/ElasticsearchClusterAsyncClient.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/cluster/ElasticsearchClusterAsyncClient.java index eaf4b2fac..7d36d3d2d 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/cluster/ElasticsearchClusterAsyncClient.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/cluster/ElasticsearchClusterAsyncClient.java @@ -135,9 +135,9 @@ public CompletableFuture allocationExplain() { // ----- Endpoint: cluster.delete_component_template /** - * Delete component templates. Deletes component templates. Component templates - * are building blocks for constructing index templates that specify index - * mappings, settings, and aliases. + * Delete component templates. Component templates are building blocks for + * constructing index templates that specify index mappings, settings, and + * aliases. * * @see Documentation @@ -153,9 +153,9 @@ public CompletableFuture deleteComponentTemplat } /** - * Delete component templates. Deletes component templates. Component templates - * are building blocks for constructing index templates that specify index - * mappings, settings, and aliases. + * Delete component templates. Component templates are building blocks for + * constructing index templates that specify index mappings, settings, and + * aliases. * * @param fn * a function that initializes a builder to create the @@ -258,7 +258,7 @@ public final CompletableFuture existsComponentTemplate( // ----- Endpoint: cluster.get_component_template /** - * Get component templates. Retrieves information about component templates. + * Get component templates. Get information about component templates. * * @see Documentation @@ -273,7 +273,7 @@ public CompletableFuture getComponentTemplate(GetC } /** - * Get component templates. Retrieves information about component templates. + * Get component templates. Get information about component templates. * * @param fn * a function that initializes a builder to create the @@ -289,7 +289,7 @@ public final CompletableFuture getComponentTemplat } /** - * Get component templates. Retrieves information about component templates. + * Get component templates. Get information about component templates. * * @see Documentation @@ -692,9 +692,9 @@ public CompletableFuture postVotingConfigExclusions() { // ----- Endpoint: cluster.put_component_template /** - * Create or update a component template. Creates or updates a component - * template. Component templates are building blocks for constructing index - * templates that specify index mappings, settings, and aliases. + * Create or update a component template. Component templates are building + * blocks for constructing index templates that specify index mappings, + * settings, and aliases. *

* An index template can be composed of multiple component templates. To use a * component template, specify it in an index template’s @@ -713,6 +713,12 @@ public CompletableFuture postVotingConfigExclusions() { * You can use C-style /* *\/ block comments in component * templates. You can include comments anywhere in the request body except * before the opening curly bracket. + *

+ * Applying component templates + *

+ * You cannot directly apply a component template to a data stream or index. To + * be applied, a component template must be included in an index template's + * composed_of list. * * @see Documentation @@ -727,9 +733,9 @@ public CompletableFuture putComponentTemplate(PutC } /** - * Create or update a component template. Creates or updates a component - * template. Component templates are building blocks for constructing index - * templates that specify index mappings, settings, and aliases. + * Create or update a component template. Component templates are building + * blocks for constructing index templates that specify index mappings, + * settings, and aliases. *

* An index template can be composed of multiple component templates. To use a * component template, specify it in an index template’s @@ -748,6 +754,12 @@ public CompletableFuture putComponentTemplate(PutC * You can use C-style /* *\/ block comments in component * templates. You can include comments anywhere in the request body except * before the opening curly bracket. + *

+ * Applying component templates + *

+ * You cannot directly apply a component template to a data stream or index. To + * be applied, a component template must be included in an index template's + * composed_of list. * * @param fn * a function that initializes a builder to create the diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/cluster/ElasticsearchClusterClient.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/cluster/ElasticsearchClusterClient.java index d980e4a55..bb1a5e092 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/cluster/ElasticsearchClusterClient.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/cluster/ElasticsearchClusterClient.java @@ -135,9 +135,9 @@ public AllocationExplainResponse allocationExplain() throws IOException, Elastic // ----- Endpoint: cluster.delete_component_template /** - * Delete component templates. Deletes component templates. Component templates - * are building blocks for constructing index templates that specify index - * mappings, settings, and aliases. + * Delete component templates. Component templates are building blocks for + * constructing index templates that specify index mappings, settings, and + * aliases. * * @see Documentation @@ -153,9 +153,9 @@ public DeleteComponentTemplateResponse deleteComponentTemplate(DeleteComponentTe } /** - * Delete component templates. Deletes component templates. Component templates - * are building blocks for constructing index templates that specify index - * mappings, settings, and aliases. + * Delete component templates. Component templates are building blocks for + * constructing index templates that specify index mappings, settings, and + * aliases. * * @param fn * a function that initializes a builder to create the @@ -262,7 +262,7 @@ public final BooleanResponse existsComponentTemplate( // ----- Endpoint: cluster.get_component_template /** - * Get component templates. Retrieves information about component templates. + * Get component templates. Get information about component templates. * * @see Documentation @@ -278,7 +278,7 @@ public GetComponentTemplateResponse getComponentTemplate(GetComponentTemplateReq } /** - * Get component templates. Retrieves information about component templates. + * Get component templates. Get information about component templates. * * @param fn * a function that initializes a builder to create the @@ -295,7 +295,7 @@ public final GetComponentTemplateResponse getComponentTemplate( } /** - * Get component templates. Retrieves information about component templates. + * Get component templates. Get information about component templates. * * @see Documentation @@ -703,9 +703,9 @@ public BooleanResponse postVotingConfigExclusions() throws IOException, Elastics // ----- Endpoint: cluster.put_component_template /** - * Create or update a component template. Creates or updates a component - * template. Component templates are building blocks for constructing index - * templates that specify index mappings, settings, and aliases. + * Create or update a component template. Component templates are building + * blocks for constructing index templates that specify index mappings, + * settings, and aliases. *

* An index template can be composed of multiple component templates. To use a * component template, specify it in an index template’s @@ -724,6 +724,12 @@ public BooleanResponse postVotingConfigExclusions() throws IOException, Elastics * You can use C-style /* *\/ block comments in component * templates. You can include comments anywhere in the request body except * before the opening curly bracket. + *

+ * Applying component templates + *

+ * You cannot directly apply a component template to a data stream or index. To + * be applied, a component template must be included in an index template's + * composed_of list. * * @see Documentation @@ -739,9 +745,9 @@ public PutComponentTemplateResponse putComponentTemplate(PutComponentTemplateReq } /** - * Create or update a component template. Creates or updates a component - * template. Component templates are building blocks for constructing index - * templates that specify index mappings, settings, and aliases. + * Create or update a component template. Component templates are building + * blocks for constructing index templates that specify index mappings, + * settings, and aliases. *

* An index template can be composed of multiple component templates. To use a * component template, specify it in an index template’s @@ -760,6 +766,12 @@ public PutComponentTemplateResponse putComponentTemplate(PutComponentTemplateReq * You can use C-style /* *\/ block comments in component * templates. You can include comments anywhere in the request body except * before the opening curly bracket. + *

+ * Applying component templates + *

+ * You cannot directly apply a component template to a data stream or index. To + * be applied, a component template must be included in an index template's + * composed_of list. * * @param fn * a function that initializes a builder to create the diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/cluster/GetComponentTemplateRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/cluster/GetComponentTemplateRequest.java index e4079755c..b1bc90038 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/cluster/GetComponentTemplateRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/cluster/GetComponentTemplateRequest.java @@ -56,7 +56,7 @@ // typedef: cluster.get_component_template.Request /** - * Get component templates. Retrieves information about component templates. + * Get component templates. Get information about component templates. * * @see API diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/cluster/PutComponentTemplateRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/cluster/PutComponentTemplateRequest.java index 82983238e..4a3806053 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/cluster/PutComponentTemplateRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/cluster/PutComponentTemplateRequest.java @@ -62,9 +62,9 @@ // typedef: cluster.put_component_template.Request /** - * Create or update a component template. Creates or updates a component - * template. Component templates are building blocks for constructing index - * templates that specify index mappings, settings, and aliases. + * Create or update a component template. Component templates are building + * blocks for constructing index templates that specify index mappings, + * settings, and aliases. *

* An index template can be composed of multiple component templates. To use a * component template, specify it in an index template’s @@ -83,6 +83,12 @@ * You can use C-style /* *\/ block comments in component * templates. You can include comments anywhere in the request body except * before the opening curly bracket. + *

+ * Applying component templates + *

+ * You cannot directly apply a component template to a data stream or index. To + * be applied, a component template must be included in an index template's + * composed_of list. * * @see API @@ -127,10 +133,11 @@ public static PutComponentTemplateRequest of(Function_meta, replace the template without specifying this information. + * Optional user metadata about the component template. It may have any + * contents. This map is not automatically generated by Elasticsearch. This + * information is stored in the cluster state, so keeping it short is + * preferable. To unset _meta, replace the template without + * specifying this information. *

* API name: {@code _meta} */ @@ -280,10 +287,11 @@ public static class Builder extends RequestBase.AbstractBuilder private Long version; /** - * Optional user metadata about the component template. May have any contents. - * This map is not automatically generated by Elasticsearch. This information is - * stored in the cluster state, so keeping it short is preferable. To unset - * _meta, replace the template without specifying this information. + * Optional user metadata about the component template. It may have any + * contents. This map is not automatically generated by Elasticsearch. This + * information is stored in the cluster state, so keeping it short is + * preferable. To unset _meta, replace the template without + * specifying this information. *

* API name: {@code _meta} *

@@ -295,10 +303,11 @@ public final Builder meta(Map map) { } /** - * Optional user metadata about the component template. May have any contents. - * This map is not automatically generated by Elasticsearch. This information is - * stored in the cluster state, so keeping it short is preferable. To unset - * _meta, replace the template without specifying this information. + * Optional user metadata about the component template. It may have any + * contents. This map is not automatically generated by Elasticsearch. This + * information is stored in the cluster state, so keeping it short is + * preferable. To unset _meta, replace the template without + * specifying this information. *

* API name: {@code _meta} *

diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/connector/ConnectorFeatures.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/connector/ConnectorFeatures.java index 04e6f8ca4..82046c5fe 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/connector/ConnectorFeatures.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/connector/ConnectorFeatures.java @@ -86,6 +86,8 @@ public static ConnectorFeatures of(Function * API name: {@code document_level_security} */ @Nullable @@ -94,6 +96,8 @@ public final FeatureEnabled documentLevelSecurity() { } /** + * Indicates whether incremental syncs are enabled. + *

* API name: {@code incremental_sync} */ @Nullable @@ -102,6 +106,8 @@ public final FeatureEnabled incrementalSync() { } /** + * Indicates whether managed connector API keys are enabled. + *

* API name: {@code native_connector_api_keys} */ @Nullable @@ -176,6 +182,8 @@ public static class Builder extends WithJsonObjectBuilderBase implement private SyncRulesFeature syncRules; /** + * Indicates whether document-level security is enabled. + *

* API name: {@code document_level_security} */ public final Builder documentLevelSecurity(@Nullable FeatureEnabled value) { @@ -184,6 +192,8 @@ public final Builder documentLevelSecurity(@Nullable FeatureEnabled value) { } /** + * Indicates whether document-level security is enabled. + *

* API name: {@code document_level_security} */ public final Builder documentLevelSecurity(Function> fn) { @@ -191,6 +201,8 @@ public final Builder documentLevelSecurity(Function * API name: {@code incremental_sync} */ public final Builder incrementalSync(@Nullable FeatureEnabled value) { @@ -199,6 +211,8 @@ public final Builder incrementalSync(@Nullable FeatureEnabled value) { } /** + * Indicates whether incremental syncs are enabled. + *

* API name: {@code incremental_sync} */ public final Builder incrementalSync(Function> fn) { @@ -206,6 +220,8 @@ public final Builder incrementalSync(Function * API name: {@code native_connector_api_keys} */ public final Builder nativeConnectorApiKeys(@Nullable FeatureEnabled value) { @@ -214,6 +230,8 @@ public final Builder nativeConnectorApiKeys(@Nullable FeatureEnabled value) { } /** + * Indicates whether managed connector API keys are enabled. + *

* API name: {@code native_connector_api_keys} */ public final Builder nativeConnectorApiKeys( diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/connector/ElasticsearchConnectorAsyncClient.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/connector/ElasticsearchConnectorAsyncClient.java index 290030bf1..ba6d9d8fd 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/connector/ElasticsearchConnectorAsyncClient.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/connector/ElasticsearchConnectorAsyncClient.java @@ -388,6 +388,106 @@ public final CompletableFuture syncJobCancel( return syncJobCancel(fn.apply(new SyncJobCancelRequest.Builder()).build()); } + // ----- Endpoint: connector.sync_job_check_in + + /** + * Check in a connector sync job. Check in a connector sync job and set the + * last_seen field to the current time before updating it in the + * internal index. + *

+ * To sync data using self-managed connectors, you need to deploy the Elastic + * connector service on your own infrastructure. This service runs automatically + * on Elastic Cloud for Elastic managed connectors. + * + * @see Documentation + * on elastic.co + */ + + public CompletableFuture syncJobCheckIn(SyncJobCheckInRequest request) { + @SuppressWarnings("unchecked") + JsonEndpoint endpoint = (JsonEndpoint) SyncJobCheckInRequest._ENDPOINT; + + return this.transport.performRequestAsync(request, endpoint, this.transportOptions); + } + + /** + * Check in a connector sync job. Check in a connector sync job and set the + * last_seen field to the current time before updating it in the + * internal index. + *

+ * To sync data using self-managed connectors, you need to deploy the Elastic + * connector service on your own infrastructure. This service runs automatically + * on Elastic Cloud for Elastic managed connectors. + * + * @param fn + * a function that initializes a builder to create the + * {@link SyncJobCheckInRequest} + * @see Documentation + * on elastic.co + */ + + public final CompletableFuture syncJobCheckIn( + Function> fn) { + return syncJobCheckIn(fn.apply(new SyncJobCheckInRequest.Builder()).build()); + } + + // ----- Endpoint: connector.sync_job_claim + + /** + * Claim a connector sync job. This action updates the job status to + * in_progress and sets the last_seen and + * started_at timestamps to the current time. Additionally, it can + * set the sync_cursor property for the sync job. + *

+ * This API is not intended for direct connector management by users. It + * supports the implementation of services that utilize the connector protocol + * to communicate with Elasticsearch. + *

+ * To sync data using self-managed connectors, you need to deploy the Elastic + * connector service on your own infrastructure. This service runs automatically + * on Elastic Cloud for Elastic managed connectors. + * + * @see Documentation + * on elastic.co + */ + + public CompletableFuture syncJobClaim(SyncJobClaimRequest request) { + @SuppressWarnings("unchecked") + JsonEndpoint endpoint = (JsonEndpoint) SyncJobClaimRequest._ENDPOINT; + + return this.transport.performRequestAsync(request, endpoint, this.transportOptions); + } + + /** + * Claim a connector sync job. This action updates the job status to + * in_progress and sets the last_seen and + * started_at timestamps to the current time. Additionally, it can + * set the sync_cursor property for the sync job. + *

+ * This API is not intended for direct connector management by users. It + * supports the implementation of services that utilize the connector protocol + * to communicate with Elasticsearch. + *

+ * To sync data using self-managed connectors, you need to deploy the Elastic + * connector service on your own infrastructure. This service runs automatically + * on Elastic Cloud for Elastic managed connectors. + * + * @param fn + * a function that initializes a builder to create the + * {@link SyncJobClaimRequest} + * @see Documentation + * on elastic.co + */ + + public final CompletableFuture syncJobClaim( + Function> fn) { + return syncJobClaim(fn.apply(new SyncJobClaimRequest.Builder()).build()); + } + // ----- Endpoint: connector.sync_job_delete /** @@ -427,6 +527,49 @@ public final CompletableFuture syncJobDelete( return syncJobDelete(fn.apply(new SyncJobDeleteRequest.Builder()).build()); } + // ----- Endpoint: connector.sync_job_error + + /** + * Set a connector sync job error. Set the error field for a + * connector sync job and set its status to error. + *

+ * To sync data using self-managed connectors, you need to deploy the Elastic + * connector service on your own infrastructure. This service runs automatically + * on Elastic Cloud for Elastic managed connectors. + * + * @see Documentation + * on elastic.co + */ + + public CompletableFuture syncJobError(SyncJobErrorRequest request) { + @SuppressWarnings("unchecked") + JsonEndpoint endpoint = (JsonEndpoint) SyncJobErrorRequest._ENDPOINT; + + return this.transport.performRequestAsync(request, endpoint, this.transportOptions); + } + + /** + * Set a connector sync job error. Set the error field for a + * connector sync job and set its status to error. + *

+ * To sync data using self-managed connectors, you need to deploy the Elastic + * connector service on your own infrastructure. This service runs automatically + * on Elastic Cloud for Elastic managed connectors. + * + * @param fn + * a function that initializes a builder to create the + * {@link SyncJobErrorRequest} + * @see Documentation + * on elastic.co + */ + + public final CompletableFuture syncJobError( + Function> fn) { + return syncJobError(fn.apply(new SyncJobErrorRequest.Builder()).build()); + } + // ----- Endpoint: connector.sync_job_get /** @@ -554,6 +697,55 @@ public final CompletableFuture syncJobPost( return syncJobPost(fn.apply(new SyncJobPostRequest.Builder()).build()); } + // ----- Endpoint: connector.sync_job_update_stats + + /** + * Set the connector sync job stats. Stats include: + * deleted_document_count, indexed_document_count, + * indexed_document_volume, and total_document_count. + * You can also update last_seen. This API is mainly used by the + * connector service for updating sync job information. + *

+ * To sync data using self-managed connectors, you need to deploy the Elastic + * connector service on your own infrastructure. This service runs automatically + * on Elastic Cloud for Elastic managed connectors. + * + * @see Documentation + * on elastic.co + */ + + public CompletableFuture syncJobUpdateStats(SyncJobUpdateStatsRequest request) { + @SuppressWarnings("unchecked") + JsonEndpoint endpoint = (JsonEndpoint) SyncJobUpdateStatsRequest._ENDPOINT; + + return this.transport.performRequestAsync(request, endpoint, this.transportOptions); + } + + /** + * Set the connector sync job stats. Stats include: + * deleted_document_count, indexed_document_count, + * indexed_document_volume, and total_document_count. + * You can also update last_seen. This API is mainly used by the + * connector service for updating sync job information. + *

+ * To sync data using self-managed connectors, you need to deploy the Elastic + * connector service on your own infrastructure. This service runs automatically + * on Elastic Cloud for Elastic managed connectors. + * + * @param fn + * a function that initializes a builder to create the + * {@link SyncJobUpdateStatsRequest} + * @see Documentation + * on elastic.co + */ + + public final CompletableFuture syncJobUpdateStats( + Function> fn) { + return syncJobUpdateStats(fn.apply(new SyncJobUpdateStatsRequest.Builder()).build()); + } + // ----- Endpoint: connector.update_active_filtering /** @@ -715,6 +907,69 @@ public final CompletableFuture updateError( return updateError(fn.apply(new UpdateErrorRequest.Builder()).build()); } + // ----- Endpoint: connector.update_features + + /** + * Update the connector features. Update the connector features in the connector + * document. This API can be used to control the following aspects of a + * connector: + *

    + *
  • document-level security
  • + *
  • incremental syncs
  • + *
  • advanced sync rules
  • + *
  • basic sync rules
  • + *
+ *

+ * Normally, the running connector service automatically manages these features. + * However, you can use this API to override the default behavior. + *

+ * To sync data using self-managed connectors, you need to deploy the Elastic + * connector service on your own infrastructure. This service runs automatically + * on Elastic Cloud for Elastic managed connectors. + * + * @see Documentation + * on elastic.co + */ + + public CompletableFuture updateFeatures(UpdateFeaturesRequest request) { + @SuppressWarnings("unchecked") + JsonEndpoint endpoint = (JsonEndpoint) UpdateFeaturesRequest._ENDPOINT; + + return this.transport.performRequestAsync(request, endpoint, this.transportOptions); + } + + /** + * Update the connector features. Update the connector features in the connector + * document. This API can be used to control the following aspects of a + * connector: + *

    + *
  • document-level security
  • + *
  • incremental syncs
  • + *
  • advanced sync rules
  • + *
  • basic sync rules
  • + *
+ *

+ * Normally, the running connector service automatically manages these features. + * However, you can use this API to override the default behavior. + *

+ * To sync data using self-managed connectors, you need to deploy the Elastic + * connector service on your own infrastructure. This service runs automatically + * on Elastic Cloud for Elastic managed connectors. + * + * @param fn + * a function that initializes a builder to create the + * {@link UpdateFeaturesRequest} + * @see Documentation + * on elastic.co + */ + + public final CompletableFuture updateFeatures( + Function> fn) { + return updateFeatures(fn.apply(new UpdateFeaturesRequest.Builder()).build()); + } + // ----- Endpoint: connector.update_filtering /** diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/connector/ElasticsearchConnectorClient.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/connector/ElasticsearchConnectorClient.java index 252ff7abd..41b320033 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/connector/ElasticsearchConnectorClient.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/connector/ElasticsearchConnectorClient.java @@ -392,6 +392,109 @@ public final SyncJobCancelResponse syncJobCancel( return syncJobCancel(fn.apply(new SyncJobCancelRequest.Builder()).build()); } + // ----- Endpoint: connector.sync_job_check_in + + /** + * Check in a connector sync job. Check in a connector sync job and set the + * last_seen field to the current time before updating it in the + * internal index. + *

+ * To sync data using self-managed connectors, you need to deploy the Elastic + * connector service on your own infrastructure. This service runs automatically + * on Elastic Cloud for Elastic managed connectors. + * + * @see Documentation + * on elastic.co + */ + + public SyncJobCheckInResponse syncJobCheckIn(SyncJobCheckInRequest request) + throws IOException, ElasticsearchException { + @SuppressWarnings("unchecked") + JsonEndpoint endpoint = (JsonEndpoint) SyncJobCheckInRequest._ENDPOINT; + + return this.transport.performRequest(request, endpoint, this.transportOptions); + } + + /** + * Check in a connector sync job. Check in a connector sync job and set the + * last_seen field to the current time before updating it in the + * internal index. + *

+ * To sync data using self-managed connectors, you need to deploy the Elastic + * connector service on your own infrastructure. This service runs automatically + * on Elastic Cloud for Elastic managed connectors. + * + * @param fn + * a function that initializes a builder to create the + * {@link SyncJobCheckInRequest} + * @see Documentation + * on elastic.co + */ + + public final SyncJobCheckInResponse syncJobCheckIn( + Function> fn) + throws IOException, ElasticsearchException { + return syncJobCheckIn(fn.apply(new SyncJobCheckInRequest.Builder()).build()); + } + + // ----- Endpoint: connector.sync_job_claim + + /** + * Claim a connector sync job. This action updates the job status to + * in_progress and sets the last_seen and + * started_at timestamps to the current time. Additionally, it can + * set the sync_cursor property for the sync job. + *

+ * This API is not intended for direct connector management by users. It + * supports the implementation of services that utilize the connector protocol + * to communicate with Elasticsearch. + *

+ * To sync data using self-managed connectors, you need to deploy the Elastic + * connector service on your own infrastructure. This service runs automatically + * on Elastic Cloud for Elastic managed connectors. + * + * @see Documentation + * on elastic.co + */ + + public SyncJobClaimResponse syncJobClaim(SyncJobClaimRequest request) throws IOException, ElasticsearchException { + @SuppressWarnings("unchecked") + JsonEndpoint endpoint = (JsonEndpoint) SyncJobClaimRequest._ENDPOINT; + + return this.transport.performRequest(request, endpoint, this.transportOptions); + } + + /** + * Claim a connector sync job. This action updates the job status to + * in_progress and sets the last_seen and + * started_at timestamps to the current time. Additionally, it can + * set the sync_cursor property for the sync job. + *

+ * This API is not intended for direct connector management by users. It + * supports the implementation of services that utilize the connector protocol + * to communicate with Elasticsearch. + *

+ * To sync data using self-managed connectors, you need to deploy the Elastic + * connector service on your own infrastructure. This service runs automatically + * on Elastic Cloud for Elastic managed connectors. + * + * @param fn + * a function that initializes a builder to create the + * {@link SyncJobClaimRequest} + * @see Documentation + * on elastic.co + */ + + public final SyncJobClaimResponse syncJobClaim( + Function> fn) + throws IOException, ElasticsearchException { + return syncJobClaim(fn.apply(new SyncJobClaimRequest.Builder()).build()); + } + // ----- Endpoint: connector.sync_job_delete /** @@ -433,6 +536,50 @@ public final SyncJobDeleteResponse syncJobDelete( return syncJobDelete(fn.apply(new SyncJobDeleteRequest.Builder()).build()); } + // ----- Endpoint: connector.sync_job_error + + /** + * Set a connector sync job error. Set the error field for a + * connector sync job and set its status to error. + *

+ * To sync data using self-managed connectors, you need to deploy the Elastic + * connector service on your own infrastructure. This service runs automatically + * on Elastic Cloud for Elastic managed connectors. + * + * @see Documentation + * on elastic.co + */ + + public SyncJobErrorResponse syncJobError(SyncJobErrorRequest request) throws IOException, ElasticsearchException { + @SuppressWarnings("unchecked") + JsonEndpoint endpoint = (JsonEndpoint) SyncJobErrorRequest._ENDPOINT; + + return this.transport.performRequest(request, endpoint, this.transportOptions); + } + + /** + * Set a connector sync job error. Set the error field for a + * connector sync job and set its status to error. + *

+ * To sync data using self-managed connectors, you need to deploy the Elastic + * connector service on your own infrastructure. This service runs automatically + * on Elastic Cloud for Elastic managed connectors. + * + * @param fn + * a function that initializes a builder to create the + * {@link SyncJobErrorRequest} + * @see Documentation + * on elastic.co + */ + + public final SyncJobErrorResponse syncJobError( + Function> fn) + throws IOException, ElasticsearchException { + return syncJobError(fn.apply(new SyncJobErrorRequest.Builder()).build()); + } + // ----- Endpoint: connector.sync_job_get /** @@ -562,6 +709,57 @@ public final SyncJobPostResponse syncJobPost( return syncJobPost(fn.apply(new SyncJobPostRequest.Builder()).build()); } + // ----- Endpoint: connector.sync_job_update_stats + + /** + * Set the connector sync job stats. Stats include: + * deleted_document_count, indexed_document_count, + * indexed_document_volume, and total_document_count. + * You can also update last_seen. This API is mainly used by the + * connector service for updating sync job information. + *

+ * To sync data using self-managed connectors, you need to deploy the Elastic + * connector service on your own infrastructure. This service runs automatically + * on Elastic Cloud for Elastic managed connectors. + * + * @see Documentation + * on elastic.co + */ + + public SyncJobUpdateStatsResponse syncJobUpdateStats(SyncJobUpdateStatsRequest request) + throws IOException, ElasticsearchException { + @SuppressWarnings("unchecked") + JsonEndpoint endpoint = (JsonEndpoint) SyncJobUpdateStatsRequest._ENDPOINT; + + return this.transport.performRequest(request, endpoint, this.transportOptions); + } + + /** + * Set the connector sync job stats. Stats include: + * deleted_document_count, indexed_document_count, + * indexed_document_volume, and total_document_count. + * You can also update last_seen. This API is mainly used by the + * connector service for updating sync job information. + *

+ * To sync data using self-managed connectors, you need to deploy the Elastic + * connector service on your own infrastructure. This service runs automatically + * on Elastic Cloud for Elastic managed connectors. + * + * @param fn + * a function that initializes a builder to create the + * {@link SyncJobUpdateStatsRequest} + * @see Documentation + * on elastic.co + */ + + public final SyncJobUpdateStatsResponse syncJobUpdateStats( + Function> fn) + throws IOException, ElasticsearchException { + return syncJobUpdateStats(fn.apply(new SyncJobUpdateStatsRequest.Builder()).build()); + } + // ----- Endpoint: connector.update_active_filtering /** @@ -729,6 +927,71 @@ public final UpdateErrorResponse updateError( return updateError(fn.apply(new UpdateErrorRequest.Builder()).build()); } + // ----- Endpoint: connector.update_features + + /** + * Update the connector features. Update the connector features in the connector + * document. This API can be used to control the following aspects of a + * connector: + *

    + *
  • document-level security
  • + *
  • incremental syncs
  • + *
  • advanced sync rules
  • + *
  • basic sync rules
  • + *
+ *

+ * Normally, the running connector service automatically manages these features. + * However, you can use this API to override the default behavior. + *

+ * To sync data using self-managed connectors, you need to deploy the Elastic + * connector service on your own infrastructure. This service runs automatically + * on Elastic Cloud for Elastic managed connectors. + * + * @see Documentation + * on elastic.co + */ + + public UpdateFeaturesResponse updateFeatures(UpdateFeaturesRequest request) + throws IOException, ElasticsearchException { + @SuppressWarnings("unchecked") + JsonEndpoint endpoint = (JsonEndpoint) UpdateFeaturesRequest._ENDPOINT; + + return this.transport.performRequest(request, endpoint, this.transportOptions); + } + + /** + * Update the connector features. Update the connector features in the connector + * document. This API can be used to control the following aspects of a + * connector: + *

    + *
  • document-level security
  • + *
  • incremental syncs
  • + *
  • advanced sync rules
  • + *
  • basic sync rules
  • + *
+ *

+ * Normally, the running connector service automatically manages these features. + * However, you can use this API to override the default behavior. + *

+ * To sync data using self-managed connectors, you need to deploy the Elastic + * connector service on your own infrastructure. This service runs automatically + * on Elastic Cloud for Elastic managed connectors. + * + * @param fn + * a function that initializes a builder to create the + * {@link UpdateFeaturesRequest} + * @see Documentation + * on elastic.co + */ + + public final UpdateFeaturesResponse updateFeatures( + Function> fn) + throws IOException, ElasticsearchException { + return updateFeatures(fn.apply(new UpdateFeaturesRequest.Builder()).build()); + } + // ----- Endpoint: connector.update_filtering /** diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/connector/SyncJobCheckInRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/connector/SyncJobCheckInRequest.java new file mode 100644 index 000000000..6bb11248f --- /dev/null +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/connector/SyncJobCheckInRequest.java @@ -0,0 +1,190 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package co.elastic.clients.elasticsearch.connector; + +import co.elastic.clients.elasticsearch._types.ErrorResponse; +import co.elastic.clients.elasticsearch._types.RequestBase; +import co.elastic.clients.json.JsonpDeserializable; +import co.elastic.clients.json.JsonpDeserializer; +import co.elastic.clients.json.ObjectBuilderDeserializer; +import co.elastic.clients.json.ObjectDeserializer; +import co.elastic.clients.transport.Endpoint; +import co.elastic.clients.transport.endpoints.SimpleEndpoint; +import co.elastic.clients.util.ApiTypeHelper; +import co.elastic.clients.util.ObjectBuilder; +import jakarta.json.stream.JsonGenerator; +import java.lang.String; +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; +import java.util.Objects; +import java.util.function.Function; +import javax.annotation.Nullable; + +//---------------------------------------------------------------- +// THIS CODE IS GENERATED. MANUAL EDITS WILL BE LOST. +//---------------------------------------------------------------- +// +// This code is generated from the Elasticsearch API specification +// at https://github.com/elastic/elasticsearch-specification +// +// Manual updates to this file will be lost when the code is +// re-generated. +// +// If you find a property that is missing or wrongly typed, please +// open an issue or a PR on the API specification repository. +// +//---------------------------------------------------------------- + +// typedef: connector.sync_job_check_in.Request + +/** + * Check in a connector sync job. Check in a connector sync job and set the + * last_seen field to the current time before updating it in the + * internal index. + *

+ * To sync data using self-managed connectors, you need to deploy the Elastic + * connector service on your own infrastructure. This service runs automatically + * on Elastic Cloud for Elastic managed connectors. + * + * @see API + * specification + */ + +public class SyncJobCheckInRequest extends RequestBase { + private final String connectorSyncJobId; + + // --------------------------------------------------------------------------------------------- + + private SyncJobCheckInRequest(Builder builder) { + + this.connectorSyncJobId = ApiTypeHelper.requireNonNull(builder.connectorSyncJobId, this, "connectorSyncJobId"); + + } + + public static SyncJobCheckInRequest of(Function> fn) { + return fn.apply(new Builder()).build(); + } + + /** + * Required - The unique identifier of the connector sync job to be checked in. + *

+ * API name: {@code connector_sync_job_id} + */ + public final String connectorSyncJobId() { + return this.connectorSyncJobId; + } + + // --------------------------------------------------------------------------------------------- + + /** + * Builder for {@link SyncJobCheckInRequest}. + */ + + public static class Builder extends RequestBase.AbstractBuilder + implements + ObjectBuilder { + private String connectorSyncJobId; + + /** + * Required - The unique identifier of the connector sync job to be checked in. + *

+ * API name: {@code connector_sync_job_id} + */ + public final Builder connectorSyncJobId(String value) { + this.connectorSyncJobId = value; + return this; + } + + @Override + protected Builder self() { + return this; + } + + /** + * Builds a {@link SyncJobCheckInRequest}. + * + * @throws NullPointerException + * if some of the required fields are null. + */ + public SyncJobCheckInRequest build() { + _checkSingleUse(); + + return new SyncJobCheckInRequest(this); + } + } + + // --------------------------------------------------------------------------------------------- + + /** + * Endpoint "{@code connector.sync_job_check_in}". + */ + public static final Endpoint _ENDPOINT = new SimpleEndpoint<>( + "es/connector.sync_job_check_in", + + // Request method + request -> { + return "PUT"; + + }, + + // Request path + request -> { + final int _connectorSyncJobId = 1 << 0; + + int propsSet = 0; + + propsSet |= _connectorSyncJobId; + + if (propsSet == (_connectorSyncJobId)) { + StringBuilder buf = new StringBuilder(); + buf.append("/_connector"); + buf.append("/_sync_job"); + buf.append("/"); + SimpleEndpoint.pathEncode(request.connectorSyncJobId, buf); + buf.append("/_check_in"); + return buf.toString(); + } + throw SimpleEndpoint.noPathTemplateFound("path"); + + }, + + // Path parameters + request -> { + Map params = new HashMap<>(); + final int _connectorSyncJobId = 1 << 0; + + int propsSet = 0; + + propsSet |= _connectorSyncJobId; + + if (propsSet == (_connectorSyncJobId)) { + params.put("connectorSyncJobId", request.connectorSyncJobId); + } + return params; + }, + + // Request parameters + request -> { + return Collections.emptyMap(); + + }, SimpleEndpoint.emptyMap(), false, SyncJobCheckInResponse._DESERIALIZER); +} diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/connector/SyncJobCheckInResponse.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/connector/SyncJobCheckInResponse.java new file mode 100644 index 000000000..182bf1af8 --- /dev/null +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/connector/SyncJobCheckInResponse.java @@ -0,0 +1,66 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package co.elastic.clients.elasticsearch.connector; + +import co.elastic.clients.json.JsonpDeserializable; +import co.elastic.clients.json.JsonpDeserializer; +import co.elastic.clients.json.ObjectBuilderDeserializer; +import co.elastic.clients.json.ObjectDeserializer; +import co.elastic.clients.util.ObjectBuilder; +import jakarta.json.stream.JsonGenerator; +import java.util.Objects; + +//---------------------------------------------------------------- +// THIS CODE IS GENERATED. MANUAL EDITS WILL BE LOST. +//---------------------------------------------------------------- +// +// This code is generated from the Elasticsearch API specification +// at https://github.com/elastic/elasticsearch-specification +// +// Manual updates to this file will be lost when the code is +// re-generated. +// +// If you find a property that is missing or wrongly typed, please +// open an issue or a PR on the API specification repository. +// +//---------------------------------------------------------------- + +// typedef: connector.sync_job_check_in.Response + +/** + * + * @see API + * specification + */ + +public class SyncJobCheckInResponse { + public SyncJobCheckInResponse() { + } + + /** + * Singleton instance for {@link SyncJobCheckInResponse}. + */ + public static final SyncJobCheckInResponse _INSTANCE = new SyncJobCheckInResponse(); + + public static final JsonpDeserializer _DESERIALIZER = JsonpDeserializer + .emptyObject(SyncJobCheckInResponse._INSTANCE); + +} diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/connector/SyncJobClaimRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/connector/SyncJobClaimRequest.java new file mode 100644 index 000000000..4a11908d7 --- /dev/null +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/connector/SyncJobClaimRequest.java @@ -0,0 +1,289 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package co.elastic.clients.elasticsearch.connector; + +import co.elastic.clients.elasticsearch._types.ErrorResponse; +import co.elastic.clients.elasticsearch._types.RequestBase; +import co.elastic.clients.json.JsonData; +import co.elastic.clients.json.JsonpDeserializable; +import co.elastic.clients.json.JsonpDeserializer; +import co.elastic.clients.json.JsonpMapper; +import co.elastic.clients.json.JsonpSerializable; +import co.elastic.clients.json.ObjectBuilderDeserializer; +import co.elastic.clients.json.ObjectDeserializer; +import co.elastic.clients.transport.Endpoint; +import co.elastic.clients.transport.endpoints.SimpleEndpoint; +import co.elastic.clients.util.ApiTypeHelper; +import co.elastic.clients.util.ObjectBuilder; +import jakarta.json.stream.JsonGenerator; +import java.lang.String; +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; +import java.util.Objects; +import java.util.function.Function; +import javax.annotation.Nullable; + +//---------------------------------------------------------------- +// THIS CODE IS GENERATED. MANUAL EDITS WILL BE LOST. +//---------------------------------------------------------------- +// +// This code is generated from the Elasticsearch API specification +// at https://github.com/elastic/elasticsearch-specification +// +// Manual updates to this file will be lost when the code is +// re-generated. +// +// If you find a property that is missing or wrongly typed, please +// open an issue or a PR on the API specification repository. +// +//---------------------------------------------------------------- + +// typedef: connector.sync_job_claim.Request + +/** + * Claim a connector sync job. This action updates the job status to + * in_progress and sets the last_seen and + * started_at timestamps to the current time. Additionally, it can + * set the sync_cursor property for the sync job. + *

+ * This API is not intended for direct connector management by users. It + * supports the implementation of services that utilize the connector protocol + * to communicate with Elasticsearch. + *

+ * To sync data using self-managed connectors, you need to deploy the Elastic + * connector service on your own infrastructure. This service runs automatically + * on Elastic Cloud for Elastic managed connectors. + * + * @see API + * specification + */ +@JsonpDeserializable +public class SyncJobClaimRequest extends RequestBase implements JsonpSerializable { + private final String connectorSyncJobId; + + @Nullable + private final JsonData syncCursor; + + private final String workerHostname; + + // --------------------------------------------------------------------------------------------- + + private SyncJobClaimRequest(Builder builder) { + + this.connectorSyncJobId = ApiTypeHelper.requireNonNull(builder.connectorSyncJobId, this, "connectorSyncJobId"); + this.syncCursor = builder.syncCursor; + this.workerHostname = ApiTypeHelper.requireNonNull(builder.workerHostname, this, "workerHostname"); + + } + + public static SyncJobClaimRequest of(Function> fn) { + return fn.apply(new Builder()).build(); + } + + /** + * Required - The unique identifier of the connector sync job. + *

+ * API name: {@code connector_sync_job_id} + */ + public final String connectorSyncJobId() { + return this.connectorSyncJobId; + } + + /** + * The cursor object from the last incremental sync job. This should reference + * the sync_cursor field in the connector state for which the job + * runs. + *

+ * API name: {@code sync_cursor} + */ + @Nullable + public final JsonData syncCursor() { + return this.syncCursor; + } + + /** + * Required - The host name of the current system that will run the job. + *

+ * API name: {@code worker_hostname} + */ + public final String workerHostname() { + return this.workerHostname; + } + + /** + * Serialize this object to JSON. + */ + public void serialize(JsonGenerator generator, JsonpMapper mapper) { + generator.writeStartObject(); + serializeInternal(generator, mapper); + generator.writeEnd(); + } + + protected void serializeInternal(JsonGenerator generator, JsonpMapper mapper) { + + if (this.syncCursor != null) { + generator.writeKey("sync_cursor"); + this.syncCursor.serialize(generator, mapper); + + } + generator.writeKey("worker_hostname"); + generator.write(this.workerHostname); + + } + + // --------------------------------------------------------------------------------------------- + + /** + * Builder for {@link SyncJobClaimRequest}. + */ + + public static class Builder extends RequestBase.AbstractBuilder + implements + ObjectBuilder { + private String connectorSyncJobId; + + @Nullable + private JsonData syncCursor; + + private String workerHostname; + + /** + * Required - The unique identifier of the connector sync job. + *

+ * API name: {@code connector_sync_job_id} + */ + public final Builder connectorSyncJobId(String value) { + this.connectorSyncJobId = value; + return this; + } + + /** + * The cursor object from the last incremental sync job. This should reference + * the sync_cursor field in the connector state for which the job + * runs. + *

+ * API name: {@code sync_cursor} + */ + public final Builder syncCursor(@Nullable JsonData value) { + this.syncCursor = value; + return this; + } + + /** + * Required - The host name of the current system that will run the job. + *

+ * API name: {@code worker_hostname} + */ + public final Builder workerHostname(String value) { + this.workerHostname = value; + return this; + } + + @Override + protected Builder self() { + return this; + } + + /** + * Builds a {@link SyncJobClaimRequest}. + * + * @throws NullPointerException + * if some of the required fields are null. + */ + public SyncJobClaimRequest build() { + _checkSingleUse(); + + return new SyncJobClaimRequest(this); + } + } + + // --------------------------------------------------------------------------------------------- + + /** + * Json deserializer for {@link SyncJobClaimRequest} + */ + public static final JsonpDeserializer _DESERIALIZER = ObjectBuilderDeserializer + .lazy(Builder::new, SyncJobClaimRequest::setupSyncJobClaimRequestDeserializer); + + protected static void setupSyncJobClaimRequestDeserializer(ObjectDeserializer op) { + + op.add(Builder::syncCursor, JsonData._DESERIALIZER, "sync_cursor"); + op.add(Builder::workerHostname, JsonpDeserializer.stringDeserializer(), "worker_hostname"); + + } + + // --------------------------------------------------------------------------------------------- + + /** + * Endpoint "{@code connector.sync_job_claim}". + */ + public static final Endpoint _ENDPOINT = new SimpleEndpoint<>( + "es/connector.sync_job_claim", + + // Request method + request -> { + return "PUT"; + + }, + + // Request path + request -> { + final int _connectorSyncJobId = 1 << 0; + + int propsSet = 0; + + propsSet |= _connectorSyncJobId; + + if (propsSet == (_connectorSyncJobId)) { + StringBuilder buf = new StringBuilder(); + buf.append("/_connector"); + buf.append("/_sync_job"); + buf.append("/"); + SimpleEndpoint.pathEncode(request.connectorSyncJobId, buf); + buf.append("/_claim"); + return buf.toString(); + } + throw SimpleEndpoint.noPathTemplateFound("path"); + + }, + + // Path parameters + request -> { + Map params = new HashMap<>(); + final int _connectorSyncJobId = 1 << 0; + + int propsSet = 0; + + propsSet |= _connectorSyncJobId; + + if (propsSet == (_connectorSyncJobId)) { + params.put("connectorSyncJobId", request.connectorSyncJobId); + } + return params; + }, + + // Request parameters + request -> { + return Collections.emptyMap(); + + }, SimpleEndpoint.emptyMap(), true, SyncJobClaimResponse._DESERIALIZER); +} diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/connector/SyncJobClaimResponse.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/connector/SyncJobClaimResponse.java new file mode 100644 index 000000000..6ce4068ee --- /dev/null +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/connector/SyncJobClaimResponse.java @@ -0,0 +1,66 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package co.elastic.clients.elasticsearch.connector; + +import co.elastic.clients.json.JsonpDeserializable; +import co.elastic.clients.json.JsonpDeserializer; +import co.elastic.clients.json.ObjectBuilderDeserializer; +import co.elastic.clients.json.ObjectDeserializer; +import co.elastic.clients.util.ObjectBuilder; +import jakarta.json.stream.JsonGenerator; +import java.util.Objects; + +//---------------------------------------------------------------- +// THIS CODE IS GENERATED. MANUAL EDITS WILL BE LOST. +//---------------------------------------------------------------- +// +// This code is generated from the Elasticsearch API specification +// at https://github.com/elastic/elasticsearch-specification +// +// Manual updates to this file will be lost when the code is +// re-generated. +// +// If you find a property that is missing or wrongly typed, please +// open an issue or a PR on the API specification repository. +// +//---------------------------------------------------------------- + +// typedef: connector.sync_job_claim.Response + +/** + * + * @see API + * specification + */ + +public class SyncJobClaimResponse { + public SyncJobClaimResponse() { + } + + /** + * Singleton instance for {@link SyncJobClaimResponse}. + */ + public static final SyncJobClaimResponse _INSTANCE = new SyncJobClaimResponse(); + + public static final JsonpDeserializer _DESERIALIZER = JsonpDeserializer + .emptyObject(SyncJobClaimResponse._INSTANCE); + +} diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/connector/SyncJobErrorRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/connector/SyncJobErrorRequest.java new file mode 100644 index 000000000..a35f87a44 --- /dev/null +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/connector/SyncJobErrorRequest.java @@ -0,0 +1,245 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package co.elastic.clients.elasticsearch.connector; + +import co.elastic.clients.elasticsearch._types.ErrorResponse; +import co.elastic.clients.elasticsearch._types.RequestBase; +import co.elastic.clients.json.JsonpDeserializable; +import co.elastic.clients.json.JsonpDeserializer; +import co.elastic.clients.json.JsonpMapper; +import co.elastic.clients.json.JsonpSerializable; +import co.elastic.clients.json.ObjectBuilderDeserializer; +import co.elastic.clients.json.ObjectDeserializer; +import co.elastic.clients.transport.Endpoint; +import co.elastic.clients.transport.endpoints.SimpleEndpoint; +import co.elastic.clients.util.ApiTypeHelper; +import co.elastic.clients.util.ObjectBuilder; +import jakarta.json.stream.JsonGenerator; +import java.lang.String; +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; +import java.util.Objects; +import java.util.function.Function; +import javax.annotation.Nullable; + +//---------------------------------------------------------------- +// THIS CODE IS GENERATED. MANUAL EDITS WILL BE LOST. +//---------------------------------------------------------------- +// +// This code is generated from the Elasticsearch API specification +// at https://github.com/elastic/elasticsearch-specification +// +// Manual updates to this file will be lost when the code is +// re-generated. +// +// If you find a property that is missing or wrongly typed, please +// open an issue or a PR on the API specification repository. +// +//---------------------------------------------------------------- + +// typedef: connector.sync_job_error.Request + +/** + * Set a connector sync job error. Set the error field for a + * connector sync job and set its status to error. + *

+ * To sync data using self-managed connectors, you need to deploy the Elastic + * connector service on your own infrastructure. This service runs automatically + * on Elastic Cloud for Elastic managed connectors. + * + * @see API + * specification + */ +@JsonpDeserializable +public class SyncJobErrorRequest extends RequestBase implements JsonpSerializable { + private final String connectorSyncJobId; + + private final String error; + + // --------------------------------------------------------------------------------------------- + + private SyncJobErrorRequest(Builder builder) { + + this.connectorSyncJobId = ApiTypeHelper.requireNonNull(builder.connectorSyncJobId, this, "connectorSyncJobId"); + this.error = ApiTypeHelper.requireNonNull(builder.error, this, "error"); + + } + + public static SyncJobErrorRequest of(Function> fn) { + return fn.apply(new Builder()).build(); + } + + /** + * Required - The unique identifier for the connector sync job. + *

+ * API name: {@code connector_sync_job_id} + */ + public final String connectorSyncJobId() { + return this.connectorSyncJobId; + } + + /** + * Required - The error for the connector sync job error field. + *

+ * API name: {@code error} + */ + public final String error() { + return this.error; + } + + /** + * Serialize this object to JSON. + */ + public void serialize(JsonGenerator generator, JsonpMapper mapper) { + generator.writeStartObject(); + serializeInternal(generator, mapper); + generator.writeEnd(); + } + + protected void serializeInternal(JsonGenerator generator, JsonpMapper mapper) { + + generator.writeKey("error"); + generator.write(this.error); + + } + + // --------------------------------------------------------------------------------------------- + + /** + * Builder for {@link SyncJobErrorRequest}. + */ + + public static class Builder extends RequestBase.AbstractBuilder + implements + ObjectBuilder { + private String connectorSyncJobId; + + private String error; + + /** + * Required - The unique identifier for the connector sync job. + *

+ * API name: {@code connector_sync_job_id} + */ + public final Builder connectorSyncJobId(String value) { + this.connectorSyncJobId = value; + return this; + } + + /** + * Required - The error for the connector sync job error field. + *

+ * API name: {@code error} + */ + public final Builder error(String value) { + this.error = value; + return this; + } + + @Override + protected Builder self() { + return this; + } + + /** + * Builds a {@link SyncJobErrorRequest}. + * + * @throws NullPointerException + * if some of the required fields are null. + */ + public SyncJobErrorRequest build() { + _checkSingleUse(); + + return new SyncJobErrorRequest(this); + } + } + + // --------------------------------------------------------------------------------------------- + + /** + * Json deserializer for {@link SyncJobErrorRequest} + */ + public static final JsonpDeserializer _DESERIALIZER = ObjectBuilderDeserializer + .lazy(Builder::new, SyncJobErrorRequest::setupSyncJobErrorRequestDeserializer); + + protected static void setupSyncJobErrorRequestDeserializer(ObjectDeserializer op) { + + op.add(Builder::error, JsonpDeserializer.stringDeserializer(), "error"); + + } + + // --------------------------------------------------------------------------------------------- + + /** + * Endpoint "{@code connector.sync_job_error}". + */ + public static final Endpoint _ENDPOINT = new SimpleEndpoint<>( + "es/connector.sync_job_error", + + // Request method + request -> { + return "PUT"; + + }, + + // Request path + request -> { + final int _connectorSyncJobId = 1 << 0; + + int propsSet = 0; + + propsSet |= _connectorSyncJobId; + + if (propsSet == (_connectorSyncJobId)) { + StringBuilder buf = new StringBuilder(); + buf.append("/_connector"); + buf.append("/_sync_job"); + buf.append("/"); + SimpleEndpoint.pathEncode(request.connectorSyncJobId, buf); + buf.append("/_error"); + return buf.toString(); + } + throw SimpleEndpoint.noPathTemplateFound("path"); + + }, + + // Path parameters + request -> { + Map params = new HashMap<>(); + final int _connectorSyncJobId = 1 << 0; + + int propsSet = 0; + + propsSet |= _connectorSyncJobId; + + if (propsSet == (_connectorSyncJobId)) { + params.put("connectorSyncJobId", request.connectorSyncJobId); + } + return params; + }, + + // Request parameters + request -> { + return Collections.emptyMap(); + + }, SimpleEndpoint.emptyMap(), true, SyncJobErrorResponse._DESERIALIZER); +} diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/connector/SyncJobErrorResponse.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/connector/SyncJobErrorResponse.java new file mode 100644 index 000000000..dd59dcd78 --- /dev/null +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/connector/SyncJobErrorResponse.java @@ -0,0 +1,66 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package co.elastic.clients.elasticsearch.connector; + +import co.elastic.clients.json.JsonpDeserializable; +import co.elastic.clients.json.JsonpDeserializer; +import co.elastic.clients.json.ObjectBuilderDeserializer; +import co.elastic.clients.json.ObjectDeserializer; +import co.elastic.clients.util.ObjectBuilder; +import jakarta.json.stream.JsonGenerator; +import java.util.Objects; + +//---------------------------------------------------------------- +// THIS CODE IS GENERATED. MANUAL EDITS WILL BE LOST. +//---------------------------------------------------------------- +// +// This code is generated from the Elasticsearch API specification +// at https://github.com/elastic/elasticsearch-specification +// +// Manual updates to this file will be lost when the code is +// re-generated. +// +// If you find a property that is missing or wrongly typed, please +// open an issue or a PR on the API specification repository. +// +//---------------------------------------------------------------- + +// typedef: connector.sync_job_error.Response + +/** + * + * @see API + * specification + */ + +public class SyncJobErrorResponse { + public SyncJobErrorResponse() { + } + + /** + * Singleton instance for {@link SyncJobErrorResponse}. + */ + public static final SyncJobErrorResponse _INSTANCE = new SyncJobErrorResponse(); + + public static final JsonpDeserializer _DESERIALIZER = JsonpDeserializer + .emptyObject(SyncJobErrorResponse._INSTANCE); + +} diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/connector/SyncJobUpdateStatsRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/connector/SyncJobUpdateStatsRequest.java new file mode 100644 index 000000000..bd3c1b832 --- /dev/null +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/connector/SyncJobUpdateStatsRequest.java @@ -0,0 +1,444 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package co.elastic.clients.elasticsearch.connector; + +import co.elastic.clients.elasticsearch._types.ErrorResponse; +import co.elastic.clients.elasticsearch._types.RequestBase; +import co.elastic.clients.elasticsearch._types.Time; +import co.elastic.clients.json.JsonData; +import co.elastic.clients.json.JsonpDeserializable; +import co.elastic.clients.json.JsonpDeserializer; +import co.elastic.clients.json.JsonpMapper; +import co.elastic.clients.json.JsonpSerializable; +import co.elastic.clients.json.ObjectBuilderDeserializer; +import co.elastic.clients.json.ObjectDeserializer; +import co.elastic.clients.transport.Endpoint; +import co.elastic.clients.transport.endpoints.SimpleEndpoint; +import co.elastic.clients.util.ApiTypeHelper; +import co.elastic.clients.util.ObjectBuilder; +import jakarta.json.stream.JsonGenerator; +import java.lang.Integer; +import java.lang.Long; +import java.lang.String; +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; +import java.util.Objects; +import java.util.function.Function; +import javax.annotation.Nullable; + +//---------------------------------------------------------------- +// THIS CODE IS GENERATED. MANUAL EDITS WILL BE LOST. +//---------------------------------------------------------------- +// +// This code is generated from the Elasticsearch API specification +// at https://github.com/elastic/elasticsearch-specification +// +// Manual updates to this file will be lost when the code is +// re-generated. +// +// If you find a property that is missing or wrongly typed, please +// open an issue or a PR on the API specification repository. +// +//---------------------------------------------------------------- + +// typedef: connector.sync_job_update_stats.Request + +/** + * Set the connector sync job stats. Stats include: + * deleted_document_count, indexed_document_count, + * indexed_document_volume, and total_document_count. + * You can also update last_seen. This API is mainly used by the + * connector service for updating sync job information. + *

+ * To sync data using self-managed connectors, you need to deploy the Elastic + * connector service on your own infrastructure. This service runs automatically + * on Elastic Cloud for Elastic managed connectors. + * + * @see API + * specification + */ +@JsonpDeserializable +public class SyncJobUpdateStatsRequest extends RequestBase implements JsonpSerializable { + private final String connectorSyncJobId; + + private final long deletedDocumentCount; + + private final long indexedDocumentCount; + + private final long indexedDocumentVolume; + + @Nullable + private final Time lastSeen; + + private final Map metadata; + + @Nullable + private final Integer totalDocumentCount; + + // --------------------------------------------------------------------------------------------- + + private SyncJobUpdateStatsRequest(Builder builder) { + + this.connectorSyncJobId = ApiTypeHelper.requireNonNull(builder.connectorSyncJobId, this, "connectorSyncJobId"); + this.deletedDocumentCount = ApiTypeHelper.requireNonNull(builder.deletedDocumentCount, this, + "deletedDocumentCount"); + this.indexedDocumentCount = ApiTypeHelper.requireNonNull(builder.indexedDocumentCount, this, + "indexedDocumentCount"); + this.indexedDocumentVolume = ApiTypeHelper.requireNonNull(builder.indexedDocumentVolume, this, + "indexedDocumentVolume"); + this.lastSeen = builder.lastSeen; + this.metadata = ApiTypeHelper.unmodifiable(builder.metadata); + this.totalDocumentCount = builder.totalDocumentCount; + + } + + public static SyncJobUpdateStatsRequest of(Function> fn) { + return fn.apply(new Builder()).build(); + } + + /** + * Required - The unique identifier of the connector sync job. + *

+ * API name: {@code connector_sync_job_id} + */ + public final String connectorSyncJobId() { + return this.connectorSyncJobId; + } + + /** + * Required - The number of documents the sync job deleted. + *

+ * API name: {@code deleted_document_count} + */ + public final long deletedDocumentCount() { + return this.deletedDocumentCount; + } + + /** + * Required - The number of documents the sync job indexed. + *

+ * API name: {@code indexed_document_count} + */ + public final long indexedDocumentCount() { + return this.indexedDocumentCount; + } + + /** + * Required - The total size of the data (in MiB) the sync job indexed. + *

+ * API name: {@code indexed_document_volume} + */ + public final long indexedDocumentVolume() { + return this.indexedDocumentVolume; + } + + /** + * The timestamp to use in the last_seen property for the connector + * sync job. + *

+ * API name: {@code last_seen} + */ + @Nullable + public final Time lastSeen() { + return this.lastSeen; + } + + /** + * The connector-specific metadata. + *

+ * API name: {@code metadata} + */ + public final Map metadata() { + return this.metadata; + } + + /** + * The total number of documents in the target index after the sync job + * finished. + *

+ * API name: {@code total_document_count} + */ + @Nullable + public final Integer totalDocumentCount() { + return this.totalDocumentCount; + } + + /** + * Serialize this object to JSON. + */ + public void serialize(JsonGenerator generator, JsonpMapper mapper) { + generator.writeStartObject(); + serializeInternal(generator, mapper); + generator.writeEnd(); + } + + protected void serializeInternal(JsonGenerator generator, JsonpMapper mapper) { + + generator.writeKey("deleted_document_count"); + generator.write(this.deletedDocumentCount); + + generator.writeKey("indexed_document_count"); + generator.write(this.indexedDocumentCount); + + generator.writeKey("indexed_document_volume"); + generator.write(this.indexedDocumentVolume); + + if (this.lastSeen != null) { + generator.writeKey("last_seen"); + this.lastSeen.serialize(generator, mapper); + + } + if (ApiTypeHelper.isDefined(this.metadata)) { + generator.writeKey("metadata"); + generator.writeStartObject(); + for (Map.Entry item0 : this.metadata.entrySet()) { + generator.writeKey(item0.getKey()); + item0.getValue().serialize(generator, mapper); + + } + generator.writeEnd(); + + } + if (this.totalDocumentCount != null) { + generator.writeKey("total_document_count"); + generator.write(this.totalDocumentCount); + + } + + } + + // --------------------------------------------------------------------------------------------- + + /** + * Builder for {@link SyncJobUpdateStatsRequest}. + */ + + public static class Builder extends RequestBase.AbstractBuilder + implements + ObjectBuilder { + private String connectorSyncJobId; + + private Long deletedDocumentCount; + + private Long indexedDocumentCount; + + private Long indexedDocumentVolume; + + @Nullable + private Time lastSeen; + + @Nullable + private Map metadata; + + @Nullable + private Integer totalDocumentCount; + + /** + * Required - The unique identifier of the connector sync job. + *

+ * API name: {@code connector_sync_job_id} + */ + public final Builder connectorSyncJobId(String value) { + this.connectorSyncJobId = value; + return this; + } + + /** + * Required - The number of documents the sync job deleted. + *

+ * API name: {@code deleted_document_count} + */ + public final Builder deletedDocumentCount(long value) { + this.deletedDocumentCount = value; + return this; + } + + /** + * Required - The number of documents the sync job indexed. + *

+ * API name: {@code indexed_document_count} + */ + public final Builder indexedDocumentCount(long value) { + this.indexedDocumentCount = value; + return this; + } + + /** + * Required - The total size of the data (in MiB) the sync job indexed. + *

+ * API name: {@code indexed_document_volume} + */ + public final Builder indexedDocumentVolume(long value) { + this.indexedDocumentVolume = value; + return this; + } + + /** + * The timestamp to use in the last_seen property for the connector + * sync job. + *

+ * API name: {@code last_seen} + */ + public final Builder lastSeen(@Nullable Time value) { + this.lastSeen = value; + return this; + } + + /** + * The timestamp to use in the last_seen property for the connector + * sync job. + *

+ * API name: {@code last_seen} + */ + public final Builder lastSeen(Function> fn) { + return this.lastSeen(fn.apply(new Time.Builder()).build()); + } + + /** + * The connector-specific metadata. + *

+ * API name: {@code metadata} + *

+ * Adds all entries of map to metadata. + */ + public final Builder metadata(Map map) { + this.metadata = _mapPutAll(this.metadata, map); + return this; + } + + /** + * The connector-specific metadata. + *

+ * API name: {@code metadata} + *

+ * Adds an entry to metadata. + */ + public final Builder metadata(String key, JsonData value) { + this.metadata = _mapPut(this.metadata, key, value); + return this; + } + + /** + * The total number of documents in the target index after the sync job + * finished. + *

+ * API name: {@code total_document_count} + */ + public final Builder totalDocumentCount(@Nullable Integer value) { + this.totalDocumentCount = value; + return this; + } + + @Override + protected Builder self() { + return this; + } + + /** + * Builds a {@link SyncJobUpdateStatsRequest}. + * + * @throws NullPointerException + * if some of the required fields are null. + */ + public SyncJobUpdateStatsRequest build() { + _checkSingleUse(); + + return new SyncJobUpdateStatsRequest(this); + } + } + + // --------------------------------------------------------------------------------------------- + + /** + * Json deserializer for {@link SyncJobUpdateStatsRequest} + */ + public static final JsonpDeserializer _DESERIALIZER = ObjectBuilderDeserializer + .lazy(Builder::new, SyncJobUpdateStatsRequest::setupSyncJobUpdateStatsRequestDeserializer); + + protected static void setupSyncJobUpdateStatsRequestDeserializer( + ObjectDeserializer op) { + + op.add(Builder::deletedDocumentCount, JsonpDeserializer.longDeserializer(), "deleted_document_count"); + op.add(Builder::indexedDocumentCount, JsonpDeserializer.longDeserializer(), "indexed_document_count"); + op.add(Builder::indexedDocumentVolume, JsonpDeserializer.longDeserializer(), "indexed_document_volume"); + op.add(Builder::lastSeen, Time._DESERIALIZER, "last_seen"); + op.add(Builder::metadata, JsonpDeserializer.stringMapDeserializer(JsonData._DESERIALIZER), "metadata"); + op.add(Builder::totalDocumentCount, JsonpDeserializer.integerDeserializer(), "total_document_count"); + + } + + // --------------------------------------------------------------------------------------------- + + /** + * Endpoint "{@code connector.sync_job_update_stats}". + */ + public static final Endpoint _ENDPOINT = new SimpleEndpoint<>( + "es/connector.sync_job_update_stats", + + // Request method + request -> { + return "PUT"; + + }, + + // Request path + request -> { + final int _connectorSyncJobId = 1 << 0; + + int propsSet = 0; + + propsSet |= _connectorSyncJobId; + + if (propsSet == (_connectorSyncJobId)) { + StringBuilder buf = new StringBuilder(); + buf.append("/_connector"); + buf.append("/_sync_job"); + buf.append("/"); + SimpleEndpoint.pathEncode(request.connectorSyncJobId, buf); + buf.append("/_stats"); + return buf.toString(); + } + throw SimpleEndpoint.noPathTemplateFound("path"); + + }, + + // Path parameters + request -> { + Map params = new HashMap<>(); + final int _connectorSyncJobId = 1 << 0; + + int propsSet = 0; + + propsSet |= _connectorSyncJobId; + + if (propsSet == (_connectorSyncJobId)) { + params.put("connectorSyncJobId", request.connectorSyncJobId); + } + return params; + }, + + // Request parameters + request -> { + return Collections.emptyMap(); + + }, SimpleEndpoint.emptyMap(), true, SyncJobUpdateStatsResponse._DESERIALIZER); +} diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/connector/SyncJobUpdateStatsResponse.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/connector/SyncJobUpdateStatsResponse.java new file mode 100644 index 000000000..8ed01c136 --- /dev/null +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/connector/SyncJobUpdateStatsResponse.java @@ -0,0 +1,66 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package co.elastic.clients.elasticsearch.connector; + +import co.elastic.clients.json.JsonpDeserializable; +import co.elastic.clients.json.JsonpDeserializer; +import co.elastic.clients.json.ObjectBuilderDeserializer; +import co.elastic.clients.json.ObjectDeserializer; +import co.elastic.clients.util.ObjectBuilder; +import jakarta.json.stream.JsonGenerator; +import java.util.Objects; + +//---------------------------------------------------------------- +// THIS CODE IS GENERATED. MANUAL EDITS WILL BE LOST. +//---------------------------------------------------------------- +// +// This code is generated from the Elasticsearch API specification +// at https://github.com/elastic/elasticsearch-specification +// +// Manual updates to this file will be lost when the code is +// re-generated. +// +// If you find a property that is missing or wrongly typed, please +// open an issue or a PR on the API specification repository. +// +//---------------------------------------------------------------- + +// typedef: connector.sync_job_update_stats.Response + +/** + * + * @see API + * specification + */ + +public class SyncJobUpdateStatsResponse { + public SyncJobUpdateStatsResponse() { + } + + /** + * Singleton instance for {@link SyncJobUpdateStatsResponse}. + */ + public static final SyncJobUpdateStatsResponse _INSTANCE = new SyncJobUpdateStatsResponse(); + + public static final JsonpDeserializer _DESERIALIZER = JsonpDeserializer + .emptyObject(SyncJobUpdateStatsResponse._INSTANCE); + +} diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/connector/SyncRulesFeature.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/connector/SyncRulesFeature.java index 68fa4f46a..5eb673119 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/connector/SyncRulesFeature.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/connector/SyncRulesFeature.java @@ -78,6 +78,8 @@ public static SyncRulesFeature of(Function * API name: {@code advanced} */ @Nullable @@ -86,6 +88,8 @@ public final FeatureEnabled advanced() { } /** + * Indicates whether basic sync rules are enabled. + *

* API name: {@code basic} */ @Nullable @@ -136,6 +140,8 @@ public static class Builder extends WithJsonObjectBuilderBase implement private FeatureEnabled basic; /** + * Indicates whether advanced sync rules are enabled. + *

* API name: {@code advanced} */ public final Builder advanced(@Nullable FeatureEnabled value) { @@ -144,6 +150,8 @@ public final Builder advanced(@Nullable FeatureEnabled value) { } /** + * Indicates whether advanced sync rules are enabled. + *

* API name: {@code advanced} */ public final Builder advanced(Function> fn) { @@ -151,6 +159,8 @@ public final Builder advanced(Function * API name: {@code basic} */ public final Builder basic(@Nullable FeatureEnabled value) { @@ -159,6 +169,8 @@ public final Builder basic(@Nullable FeatureEnabled value) { } /** + * Indicates whether basic sync rules are enabled. + *

* API name: {@code basic} */ public final Builder basic(Function> fn) { diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/connector/UpdateFeaturesRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/connector/UpdateFeaturesRequest.java new file mode 100644 index 000000000..f8cb4e806 --- /dev/null +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/connector/UpdateFeaturesRequest.java @@ -0,0 +1,257 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package co.elastic.clients.elasticsearch.connector; + +import co.elastic.clients.elasticsearch._types.ErrorResponse; +import co.elastic.clients.elasticsearch._types.RequestBase; +import co.elastic.clients.json.JsonpDeserializable; +import co.elastic.clients.json.JsonpDeserializer; +import co.elastic.clients.json.JsonpMapper; +import co.elastic.clients.json.JsonpSerializable; +import co.elastic.clients.json.ObjectBuilderDeserializer; +import co.elastic.clients.json.ObjectDeserializer; +import co.elastic.clients.transport.Endpoint; +import co.elastic.clients.transport.endpoints.SimpleEndpoint; +import co.elastic.clients.util.ApiTypeHelper; +import co.elastic.clients.util.ObjectBuilder; +import jakarta.json.stream.JsonGenerator; +import java.lang.String; +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; +import java.util.Objects; +import java.util.function.Function; +import javax.annotation.Nullable; + +//---------------------------------------------------------------- +// THIS CODE IS GENERATED. MANUAL EDITS WILL BE LOST. +//---------------------------------------------------------------- +// +// This code is generated from the Elasticsearch API specification +// at https://github.com/elastic/elasticsearch-specification +// +// Manual updates to this file will be lost when the code is +// re-generated. +// +// If you find a property that is missing or wrongly typed, please +// open an issue or a PR on the API specification repository. +// +//---------------------------------------------------------------- + +// typedef: connector.update_features.Request + +/** + * Update the connector features. Update the connector features in the connector + * document. This API can be used to control the following aspects of a + * connector: + *

    + *
  • document-level security
  • + *
  • incremental syncs
  • + *
  • advanced sync rules
  • + *
  • basic sync rules
  • + *
+ *

+ * Normally, the running connector service automatically manages these features. + * However, you can use this API to override the default behavior. + *

+ * To sync data using self-managed connectors, you need to deploy the Elastic + * connector service on your own infrastructure. This service runs automatically + * on Elastic Cloud for Elastic managed connectors. + * + * @see API + * specification + */ +@JsonpDeserializable +public class UpdateFeaturesRequest extends RequestBase implements JsonpSerializable { + private final String connectorId; + + private final ConnectorFeatures features; + + // --------------------------------------------------------------------------------------------- + + private UpdateFeaturesRequest(Builder builder) { + + this.connectorId = ApiTypeHelper.requireNonNull(builder.connectorId, this, "connectorId"); + this.features = ApiTypeHelper.requireNonNull(builder.features, this, "features"); + + } + + public static UpdateFeaturesRequest of(Function> fn) { + return fn.apply(new Builder()).build(); + } + + /** + * Required - The unique identifier of the connector to be updated. + *

+ * API name: {@code connector_id} + */ + public final String connectorId() { + return this.connectorId; + } + + /** + * Required - API name: {@code features} + */ + public final ConnectorFeatures features() { + return this.features; + } + + /** + * Serialize this object to JSON. + */ + public void serialize(JsonGenerator generator, JsonpMapper mapper) { + generator.writeStartObject(); + serializeInternal(generator, mapper); + generator.writeEnd(); + } + + protected void serializeInternal(JsonGenerator generator, JsonpMapper mapper) { + + generator.writeKey("features"); + this.features.serialize(generator, mapper); + + } + + // --------------------------------------------------------------------------------------------- + + /** + * Builder for {@link UpdateFeaturesRequest}. + */ + + public static class Builder extends RequestBase.AbstractBuilder + implements + ObjectBuilder { + private String connectorId; + + private ConnectorFeatures features; + + /** + * Required - The unique identifier of the connector to be updated. + *

+ * API name: {@code connector_id} + */ + public final Builder connectorId(String value) { + this.connectorId = value; + return this; + } + + /** + * Required - API name: {@code features} + */ + public final Builder features(ConnectorFeatures value) { + this.features = value; + return this; + } + + /** + * Required - API name: {@code features} + */ + public final Builder features(Function> fn) { + return this.features(fn.apply(new ConnectorFeatures.Builder()).build()); + } + + @Override + protected Builder self() { + return this; + } + + /** + * Builds a {@link UpdateFeaturesRequest}. + * + * @throws NullPointerException + * if some of the required fields are null. + */ + public UpdateFeaturesRequest build() { + _checkSingleUse(); + + return new UpdateFeaturesRequest(this); + } + } + + // --------------------------------------------------------------------------------------------- + + /** + * Json deserializer for {@link UpdateFeaturesRequest} + */ + public static final JsonpDeserializer _DESERIALIZER = ObjectBuilderDeserializer + .lazy(Builder::new, UpdateFeaturesRequest::setupUpdateFeaturesRequestDeserializer); + + protected static void setupUpdateFeaturesRequestDeserializer(ObjectDeserializer op) { + + op.add(Builder::features, ConnectorFeatures._DESERIALIZER, "features"); + + } + + // --------------------------------------------------------------------------------------------- + + /** + * Endpoint "{@code connector.update_features}". + */ + public static final Endpoint _ENDPOINT = new SimpleEndpoint<>( + "es/connector.update_features", + + // Request method + request -> { + return "PUT"; + + }, + + // Request path + request -> { + final int _connectorId = 1 << 0; + + int propsSet = 0; + + propsSet |= _connectorId; + + if (propsSet == (_connectorId)) { + StringBuilder buf = new StringBuilder(); + buf.append("/_connector"); + buf.append("/"); + SimpleEndpoint.pathEncode(request.connectorId, buf); + buf.append("/_features"); + return buf.toString(); + } + throw SimpleEndpoint.noPathTemplateFound("path"); + + }, + + // Path parameters + request -> { + Map params = new HashMap<>(); + final int _connectorId = 1 << 0; + + int propsSet = 0; + + propsSet |= _connectorId; + + if (propsSet == (_connectorId)) { + params.put("connectorId", request.connectorId); + } + return params; + }, + + // Request parameters + request -> { + return Collections.emptyMap(); + + }, SimpleEndpoint.emptyMap(), true, UpdateFeaturesResponse._DESERIALIZER); +} diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/connector/UpdateFeaturesResponse.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/connector/UpdateFeaturesResponse.java new file mode 100644 index 000000000..af8780964 --- /dev/null +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/connector/UpdateFeaturesResponse.java @@ -0,0 +1,157 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package co.elastic.clients.elasticsearch.connector; + +import co.elastic.clients.elasticsearch._types.Result; +import co.elastic.clients.json.JsonpDeserializable; +import co.elastic.clients.json.JsonpDeserializer; +import co.elastic.clients.json.JsonpMapper; +import co.elastic.clients.json.JsonpSerializable; +import co.elastic.clients.json.JsonpUtils; +import co.elastic.clients.json.ObjectBuilderDeserializer; +import co.elastic.clients.json.ObjectDeserializer; +import co.elastic.clients.util.ApiTypeHelper; +import co.elastic.clients.util.ObjectBuilder; +import co.elastic.clients.util.WithJsonObjectBuilderBase; +import jakarta.json.stream.JsonGenerator; +import java.util.Objects; +import java.util.function.Function; +import javax.annotation.Nullable; + +//---------------------------------------------------------------- +// THIS CODE IS GENERATED. MANUAL EDITS WILL BE LOST. +//---------------------------------------------------------------- +// +// This code is generated from the Elasticsearch API specification +// at https://github.com/elastic/elasticsearch-specification +// +// Manual updates to this file will be lost when the code is +// re-generated. +// +// If you find a property that is missing or wrongly typed, please +// open an issue or a PR on the API specification repository. +// +//---------------------------------------------------------------- + +// typedef: connector.update_features.Response + +/** + * + * @see API + * specification + */ +@JsonpDeserializable +public class UpdateFeaturesResponse implements JsonpSerializable { + private final Result result; + + // --------------------------------------------------------------------------------------------- + + private UpdateFeaturesResponse(Builder builder) { + + this.result = ApiTypeHelper.requireNonNull(builder.result, this, "result"); + + } + + public static UpdateFeaturesResponse of(Function> fn) { + return fn.apply(new Builder()).build(); + } + + /** + * Required - API name: {@code result} + */ + public final Result result() { + return this.result; + } + + /** + * Serialize this object to JSON. + */ + public void serialize(JsonGenerator generator, JsonpMapper mapper) { + generator.writeStartObject(); + serializeInternal(generator, mapper); + generator.writeEnd(); + } + + protected void serializeInternal(JsonGenerator generator, JsonpMapper mapper) { + + generator.writeKey("result"); + this.result.serialize(generator, mapper); + + } + + @Override + public String toString() { + return JsonpUtils.toString(this); + } + + // --------------------------------------------------------------------------------------------- + + /** + * Builder for {@link UpdateFeaturesResponse}. + */ + + public static class Builder extends WithJsonObjectBuilderBase + implements + ObjectBuilder { + private Result result; + + /** + * Required - API name: {@code result} + */ + public final Builder result(Result value) { + this.result = value; + return this; + } + + @Override + protected Builder self() { + return this; + } + + /** + * Builds a {@link UpdateFeaturesResponse}. + * + * @throws NullPointerException + * if some of the required fields are null. + */ + public UpdateFeaturesResponse build() { + _checkSingleUse(); + + return new UpdateFeaturesResponse(this); + } + } + + // --------------------------------------------------------------------------------------------- + + /** + * Json deserializer for {@link UpdateFeaturesResponse} + */ + public static final JsonpDeserializer _DESERIALIZER = ObjectBuilderDeserializer + .lazy(Builder::new, UpdateFeaturesResponse::setupUpdateFeaturesResponseDeserializer); + + protected static void setupUpdateFeaturesResponseDeserializer( + ObjectDeserializer op) { + + op.add(Builder::result, Result._DESERIALIZER, "result"); + + } + +} diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/core/InfoRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/core/InfoRequest.java index d21ce18fe..598e07652 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/core/InfoRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/core/InfoRequest.java @@ -50,7 +50,7 @@ // typedef: _global.info.Request /** - * Get cluster info. Returns basic information about the cluster. + * Get cluster info. Get basic build, version, and cluster information. * * @see API * specification diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/core/InfoResponse.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/core/InfoResponse.java index f78f1304f..e5fbe75dd 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/core/InfoResponse.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/core/InfoResponse.java @@ -87,7 +87,9 @@ public static InfoResponse of(Function> fn) } /** - * Required - API name: {@code cluster_name} + * Required - The responding cluster's name. + *

+ * API name: {@code cluster_name} */ public final String clusterName() { return this.clusterName; @@ -101,7 +103,9 @@ public final String clusterUuid() { } /** - * Required - API name: {@code name} + * Required - The responding node's name. + *

+ * API name: {@code name} */ public final String name() { return this.name; @@ -115,7 +119,9 @@ public final String tagline() { } /** - * Required - API name: {@code version} + * Required - The running version of Elasticsearch. + *

+ * API name: {@code version} */ public final ElasticsearchVersionInfo version() { return this.version; @@ -172,7 +178,9 @@ public static class Builder extends WithJsonObjectBuilderBase implement private ElasticsearchVersionInfo version; /** - * Required - API name: {@code cluster_name} + * Required - The responding cluster's name. + *

+ * API name: {@code cluster_name} */ public final Builder clusterName(String value) { this.clusterName = value; @@ -188,7 +196,9 @@ public final Builder clusterUuid(String value) { } /** - * Required - API name: {@code name} + * Required - The responding node's name. + *

+ * API name: {@code name} */ public final Builder name(String value) { this.name = value; @@ -204,7 +214,9 @@ public final Builder tagline(String value) { } /** - * Required - API name: {@code version} + * Required - The running version of Elasticsearch. + *

+ * API name: {@code version} */ public final Builder version(ElasticsearchVersionInfo value) { this.version = value; @@ -212,7 +224,9 @@ public final Builder version(ElasticsearchVersionInfo value) { } /** - * Required - API name: {@code version} + * Required - The running version of Elasticsearch. + *

+ * API name: {@code version} */ public final Builder version( Function> fn) { diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/dangling_indices/DeleteDanglingIndexRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/dangling_indices/DeleteDanglingIndexRequest.java index f0995034d..02b8adc07 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/dangling_indices/DeleteDanglingIndexRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/dangling_indices/DeleteDanglingIndexRequest.java @@ -57,11 +57,9 @@ // typedef: dangling_indices.delete_dangling_index.Request /** - * Delete a dangling index. - *

- * If Elasticsearch encounters index data that is absent from the current - * cluster state, those indices are considered to be dangling. For example, this - * can happen if you delete more than + * Delete a dangling index. If Elasticsearch encounters index data that is + * absent from the current cluster state, those indices are considered to be + * dangling. For example, this can happen if you delete more than * cluster.indices.tombstones.size indices while an Elasticsearch * node is offline. * diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/dangling_indices/ElasticsearchDanglingIndicesAsyncClient.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/dangling_indices/ElasticsearchDanglingIndicesAsyncClient.java index 9399da94c..0b89a9acd 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/dangling_indices/ElasticsearchDanglingIndicesAsyncClient.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/dangling_indices/ElasticsearchDanglingIndicesAsyncClient.java @@ -70,16 +70,14 @@ public ElasticsearchDanglingIndicesAsyncClient withTransportOptions(@Nullable Tr // ----- Endpoint: dangling_indices.delete_dangling_index /** - * Delete a dangling index. - *

- * If Elasticsearch encounters index data that is absent from the current - * cluster state, those indices are considered to be dangling. For example, this - * can happen if you delete more than + * Delete a dangling index. If Elasticsearch encounters index data that is + * absent from the current cluster state, those indices are considered to be + * dangling. For example, this can happen if you delete more than * cluster.indices.tombstones.size indices while an Elasticsearch * node is offline. * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/dangling-index-delete.html">Documentation * on elastic.co */ @@ -91,11 +89,9 @@ public CompletableFuture deleteDanglingIndex(Delete } /** - * Delete a dangling index. - *

- * If Elasticsearch encounters index data that is absent from the current - * cluster state, those indices are considered to be dangling. For example, this - * can happen if you delete more than + * Delete a dangling index. If Elasticsearch encounters index data that is + * absent from the current cluster state, those indices are considered to be + * dangling. For example, this can happen if you delete more than * cluster.indices.tombstones.size indices while an Elasticsearch * node is offline. * @@ -103,7 +99,7 @@ public CompletableFuture deleteDanglingIndex(Delete * a function that initializes a builder to create the * {@link DeleteDanglingIndexRequest} * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/dangling-index-delete.html">Documentation * on elastic.co */ @@ -124,7 +120,7 @@ public final CompletableFuture deleteDanglingIndex( * node is offline. * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/dangling-index-import.html">Documentation * on elastic.co */ @@ -148,7 +144,7 @@ public CompletableFuture importDanglingIndex(Import * a function that initializes a builder to create the * {@link ImportDanglingIndexRequest} * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/dangling-index-import.html">Documentation * on elastic.co */ @@ -171,7 +167,7 @@ public final CompletableFuture importDanglingIndex( * Use this API to list dangling indices, which you can then import or delete. * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/dangling-indices-list.html">Documentation * on elastic.co */ public CompletableFuture listDanglingIndices() { diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/dangling_indices/ElasticsearchDanglingIndicesClient.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/dangling_indices/ElasticsearchDanglingIndicesClient.java index dd146a73a..5d7f11bc2 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/dangling_indices/ElasticsearchDanglingIndicesClient.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/dangling_indices/ElasticsearchDanglingIndicesClient.java @@ -71,16 +71,14 @@ public ElasticsearchDanglingIndicesClient withTransportOptions(@Nullable Transpo // ----- Endpoint: dangling_indices.delete_dangling_index /** - * Delete a dangling index. - *

- * If Elasticsearch encounters index data that is absent from the current - * cluster state, those indices are considered to be dangling. For example, this - * can happen if you delete more than + * Delete a dangling index. If Elasticsearch encounters index data that is + * absent from the current cluster state, those indices are considered to be + * dangling. For example, this can happen if you delete more than * cluster.indices.tombstones.size indices while an Elasticsearch * node is offline. * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/dangling-index-delete.html">Documentation * on elastic.co */ @@ -93,11 +91,9 @@ public DeleteDanglingIndexResponse deleteDanglingIndex(DeleteDanglingIndexReques } /** - * Delete a dangling index. - *

- * If Elasticsearch encounters index data that is absent from the current - * cluster state, those indices are considered to be dangling. For example, this - * can happen if you delete more than + * Delete a dangling index. If Elasticsearch encounters index data that is + * absent from the current cluster state, those indices are considered to be + * dangling. For example, this can happen if you delete more than * cluster.indices.tombstones.size indices while an Elasticsearch * node is offline. * @@ -105,7 +101,7 @@ public DeleteDanglingIndexResponse deleteDanglingIndex(DeleteDanglingIndexReques * a function that initializes a builder to create the * {@link DeleteDanglingIndexRequest} * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/dangling-index-delete.html">Documentation * on elastic.co */ @@ -127,7 +123,7 @@ public final DeleteDanglingIndexResponse deleteDanglingIndex( * node is offline. * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/dangling-index-import.html">Documentation * on elastic.co */ @@ -152,7 +148,7 @@ public ImportDanglingIndexResponse importDanglingIndex(ImportDanglingIndexReques * a function that initializes a builder to create the * {@link ImportDanglingIndexRequest} * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/dangling-index-import.html">Documentation * on elastic.co */ @@ -176,7 +172,7 @@ public final ImportDanglingIndexResponse importDanglingIndex( * Use this API to list dangling indices, which you can then import or delete. * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/dangling-indices-list.html">Documentation * on elastic.co */ public ListDanglingIndicesResponse listDanglingIndices() throws IOException, ElasticsearchException { diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/doc-files/api-spec.html b/java-client/src/main/java/co/elastic/clients/elasticsearch/doc-files/api-spec.html index 96c4b13a0..e78118a4e 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/doc-files/api-spec.html +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/doc-files/api-spec.html @@ -89,8 +89,8 @@ '_global.health_report.StagnatingBackingIndices': '_global/health_report/types.ts#L157-L161', '_global.index.Request': '_global/index/IndexRequest.ts#L35-L119', '_global.index.Response': '_global/index/IndexResponse.ts#L22-L24', -'_global.info.Request': '_global/info/RootNodeInfoRequest.ts#L22-L29', -'_global.info.Response': '_global/info/RootNodeInfoResponse.ts#L23-L31', +'_global.info.Request': '_global/info/RootNodeInfoRequest.ts#L22-L31', +'_global.info.Response': '_global/info/RootNodeInfoResponse.ts#L23-L40', '_global.knn_search.Request': '_global/knn_search/KnnSearchRequest.ts#L26-L96', '_global.knn_search.Response': '_global/knn_search/KnnSearchResponse.ts#L26-L54', '_global.knn_search._types.Query': '_global/knn_search/_types/Knn.ts#L24-L33', @@ -273,11 +273,11 @@ '_types.DFRBasicModel': '_types/Similarity.ts#L32-L40', '_types.DistanceUnit': '_types/Geo.ts#L30-L40', '_types.DocStats': '_types/Stats.ts#L97-L109', -'_types.ElasticsearchVersionInfo': '_types/Base.ts#L54-L64', -'_types.ElasticsearchVersionMinInfo': '_types/Base.ts#L66-L74', +'_types.ElasticsearchVersionInfo': '_types/Base.ts#L54-L93', +'_types.ElasticsearchVersionMinInfo': '_types/Base.ts#L95-L103', '_types.EmptyObject': '_types/common.ts#L161-L162', '_types.ErrorCause': '_types/Errors.ts#L25-L50', -'_types.ErrorResponseBase': '_types/Base.ts#L76-L85', +'_types.ErrorResponseBase': '_types/Base.ts#L105-L114', '_types.ExpandWildcard': '_types/common.ts#L202-L216', '_types.FieldMemoryUsage': '_types/Stats.ts#L118-L121', '_types.FieldSizeUsage': '_types/Stats.ts#L92-L95', @@ -299,7 +299,7 @@ '_types.IBLambda': '_types/Similarity.ts#L47-L50', '_types.IndexingStats': '_types/Stats.ts#L143-L159', '_types.IndicesOptions': '_types/common.ts#L336-L363', -'_types.IndicesResponseBase': '_types/Base.ts#L87-L89', +'_types.IndicesResponseBase': '_types/Base.ts#L116-L118', '_types.InlineGet': '_types/common.ts#L321-L334', '_types.KnnQuery': '_types/Knn.ts#L54-L72', '_types.KnnRetriever': '_types/Retriever.ts#L64-L77', @@ -346,7 +346,7 @@ '_types.SegmentsStats': '_types/Stats.ts#L273-L366', '_types.ShardFailure': '_types/Errors.ts#L52-L58', '_types.ShardStatistics': '_types/Stats.ts#L54-L66', -'_types.ShardsOperationResponseBase': '_types/Base.ts#L91-L94', +'_types.ShardsOperationResponseBase': '_types/Base.ts#L120-L123', '_types.SlicedScroll': '_types/SlicedScroll.ts#L23-L27', '_types.Slices': '_types/common.ts#L365-L370', '_types.SlicesCalculation': '_types/common.ts#L372-L380', @@ -1147,7 +1147,7 @@ 'cluster.allocation_explain.Response': 'cluster/allocation_explain/ClusterAllocationExplainResponse.ts#L32-L64', 'cluster.allocation_explain.UnassignedInformation': 'cluster/allocation_explain/types.ts#L128-L136', 'cluster.allocation_explain.UnassignedInformationReason': 'cluster/allocation_explain/types.ts#L138-L157', -'cluster.delete_component_template.Request': 'cluster/delete_component_template/ClusterDeleteComponentTemplateRequest.ts#L24-L56', +'cluster.delete_component_template.Request': 'cluster/delete_component_template/ClusterDeleteComponentTemplateRequest.ts#L24-L55', 'cluster.delete_component_template.Response': 'cluster/delete_component_template/ClusterDeleteComponentTemplateResponse.ts#L22-L24', 'cluster.delete_voting_config_exclusions.Request': 'cluster/delete_voting_config_exclusions/ClusterDeleteVotingConfigExclusionsRequest.ts#L22-L43', 'cluster.exists_component_template.Request': 'cluster/exists_component_template/ClusterComponentTemplateExistsRequest.ts#L24-L56', @@ -1166,7 +1166,7 @@ 'cluster.pending_tasks.Request': 'cluster/pending_tasks/ClusterPendingTasksRequest.ts#L23-L50', 'cluster.pending_tasks.Response': 'cluster/pending_tasks/ClusterPendingTasksResponse.ts#L22-L24', 'cluster.post_voting_config_exclusions.Request': 'cluster/post_voting_config_exclusions/ClusterPostVotingConfigExclusionsRequest.ts#L24-L69', -'cluster.put_component_template.Request': 'cluster/put_component_template/ClusterPutComponentTemplateRequest.ts#L25-L95', +'cluster.put_component_template.Request': 'cluster/put_component_template/ClusterPutComponentTemplateRequest.ts#L25-L99', 'cluster.put_component_template.Response': 'cluster/put_component_template/ClusterPutComponentTemplateResponse.ts#L22-L24', 'cluster.put_settings.Request': 'cluster/put_settings/ClusterPutSettingsRequest.ts#L25-L61', 'cluster.put_settings.Response': 'cluster/put_settings/ClusterPutSettingsResponse.ts#L23-L29', @@ -1220,9 +1220,9 @@ 'cluster.stats.Response': 'cluster/stats/ClusterStatsResponse.ts#L53-L55', 'cluster.stats.RuntimeFieldTypes': 'cluster/stats/types.ts#L169-L226', 'cluster.stats.StatsResponseBase': 'cluster/stats/ClusterStatsResponse.ts#L25-L51', -'connector._types.Connector': 'connector/_types/Connector.ts#L237-L268', +'connector._types.Connector': 'connector/_types/Connector.ts#L252-L283', 'connector._types.ConnectorConfigProperties': 'connector/_types/Connector.ts#L83-L99', -'connector._types.ConnectorFeatures': 'connector/_types/Connector.ts#L224-L229', +'connector._types.ConnectorFeatures': 'connector/_types/Connector.ts#L230-L244', 'connector._types.ConnectorFieldType': 'connector/_types/Connector.ts#L43-L48', 'connector._types.ConnectorScheduling': 'connector/_types/Connector.ts#L106-L110', 'connector._types.ConnectorStatus': 'connector/_types/Connector.ts#L130-L136', @@ -1247,12 +1247,12 @@ 'connector._types.LessThanValidation': 'connector/_types/Connector.ts#L58-L61', 'connector._types.ListTypeValidation': 'connector/_types/Connector.ts#L68-L71', 'connector._types.RegexValidation': 'connector/_types/Connector.ts#L78-L81', -'connector._types.SchedulingConfiguration': 'connector/_types/Connector.ts#L231-L235', +'connector._types.SchedulingConfiguration': 'connector/_types/Connector.ts#L246-L250', 'connector._types.SelectOption': 'connector/_types/Connector.ts#L25-L28', 'connector._types.SyncJobConnectorReference': 'connector/_types/SyncJob.ts#L31-L40', 'connector._types.SyncJobTriggerMethod': 'connector/_types/SyncJob.ts#L48-L51', 'connector._types.SyncJobType': 'connector/_types/SyncJob.ts#L42-L46', -'connector._types.SyncRulesFeature': 'connector/_types/Connector.ts#L219-L222', +'connector._types.SyncRulesFeature': 'connector/_types/Connector.ts#L219-L228', 'connector._types.SyncStatus': 'connector/_types/Connector.ts#L138-L146', 'connector._types.Validation': 'connector/_types/Connector.ts#L50-L56', 'connector.check_in.Request': 'connector/check_in/ConnectorCheckInRequest.ts#L22-L38', @@ -1269,14 +1269,22 @@ 'connector.put.Response': 'connector/put/ConnectorPutResponse.ts#L23-L28', 'connector.sync_job_cancel.Request': 'connector/sync_job_cancel/SyncJobCancelRequest.ts#L22-L39', 'connector.sync_job_cancel.Response': 'connector/sync_job_cancel/SyncJobCancelResponse.ts#L22-L26', +'connector.sync_job_check_in.Request': 'connector/sync_job_check_in/SyncJobCheckInRequest.ts#L22-L39', +'connector.sync_job_check_in.Response': 'connector/sync_job_check_in/SyncJobCheckInResponse.ts#L20-L22', +'connector.sync_job_claim.Request': 'connector/sync_job_claim/SyncJobClaimRequest.ts#L23-L55', +'connector.sync_job_claim.Response': 'connector/sync_job_claim/SyncJobClaimResponse.ts#L20-L22', 'connector.sync_job_delete.Request': 'connector/sync_job_delete/SyncJobDeleteRequest.ts#L22-L39', 'connector.sync_job_delete.Response': 'connector/sync_job_delete/SyncJobDeleteResponse.ts#L22-L24', +'connector.sync_job_error.Request': 'connector/sync_job_error/SyncJobErrorRequest.ts#L23-L46', +'connector.sync_job_error.Response': 'connector/sync_job_error/SyncJobErrorResponse.ts#L20-L22', 'connector.sync_job_get.Request': 'connector/sync_job_get/SyncJobGetRequest.ts#L22-L36', 'connector.sync_job_get.Response': 'connector/sync_job_get/SyncJobGetResponse.ts#L22-L24', 'connector.sync_job_list.Request': 'connector/sync_job_list/SyncJobListRequest.ts#L25-L57', 'connector.sync_job_list.Response': 'connector/sync_job_list/SyncJobListResponse.ts#L23-L28', 'connector.sync_job_post.Request': 'connector/sync_job_post/SyncJobPostRequest.ts#L23-L45', 'connector.sync_job_post.Response': 'connector/sync_job_post/SyncJobPostResponse.ts#L22-L26', +'connector.sync_job_update_stats.Request': 'connector/sync_job_update_stats/SyncJobUpdateStatsRequest.ts#L24-L72', +'connector.sync_job_update_stats.Response': 'connector/sync_job_update_stats/SyncJobUpdateStatsResponse.ts#L20-L22', 'connector.update_active_filtering.Request': 'connector/update_active_filtering/ConnectorUpdateActiveFilteringRequest.ts#L22-L38', 'connector.update_active_filtering.Response': 'connector/update_active_filtering/ConnectorUpdateActiveFilteringResponse.ts#L22-L26', 'connector.update_api_key_id.Request': 'connector/update_api_key_id/ConnectorUpdateAPIKeyIDRequest.ts#L21-L47', @@ -1285,6 +1293,8 @@ 'connector.update_configuration.Response': 'connector/update_configuration/ConnectorUpdateConfigurationResponse.ts#L22-L26', 'connector.update_error.Request': 'connector/update_error/ConnectorUpdateErrorRequest.ts#L23-L48', 'connector.update_error.Response': 'connector/update_error/ConnectorUpdateErrorResponse.ts#L22-L26', +'connector.update_features.Request': 'connector/update_features/ConnectorUpdateFeaturesRequest.ts#L23-L55', +'connector.update_features.Response': 'connector/update_features/ConnectorUpdateFeaturesResponse.ts#L22-L26', 'connector.update_filtering.Request': 'connector/update_filtering/ConnectorUpdateFilteringRequest.ts#L27-L54', 'connector.update_filtering.Response': 'connector/update_filtering/ConnectorUpdateFilteringResponse.ts#L22-L26', 'connector.update_filtering_validation.Request': 'connector/update_filtering_validation/ConnectorUpdateFilteringValidationRequest.ts#L23-L42', @@ -1303,12 +1313,12 @@ 'connector.update_service_type.Response': 'connector/update_service_type/ConnectorUpdateServiceTypeResponse.ts#L22-L26', 'connector.update_status.Request': 'connector/update_status/ConnectorUpdateStatusRequest.ts#L23-L43', 'connector.update_status.Response': 'connector/update_status/ConnectorUpdateStatusResponse.ts#L22-L26', -'dangling_indices.delete_dangling_index.Request': 'dangling_indices/delete_dangling_index/DeleteDanglingIndexRequest.ts#L24-L48', +'dangling_indices.delete_dangling_index.Request': 'dangling_indices/delete_dangling_index/DeleteDanglingIndexRequest.ts#L24-L49', 'dangling_indices.delete_dangling_index.Response': 'dangling_indices/delete_dangling_index/DeleteDanglingIndexResponse.ts#L22-L24', -'dangling_indices.import_dangling_index.Request': 'dangling_indices/import_dangling_index/ImportDanglingIndexRequest.ts#L24-L49', +'dangling_indices.import_dangling_index.Request': 'dangling_indices/import_dangling_index/ImportDanglingIndexRequest.ts#L24-L51', 'dangling_indices.import_dangling_index.Response': 'dangling_indices/import_dangling_index/ImportDanglingIndexResponse.ts#L22-L24', 'dangling_indices.list_dangling_indices.DanglingIndex': 'dangling_indices/list_dangling_indices/ListDanglingIndicesResponse.ts#L29-L34', -'dangling_indices.list_dangling_indices.Request': 'dangling_indices/list_dangling_indices/ListDanglingIndicesRequest.ts#L22-L33', +'dangling_indices.list_dangling_indices.Request': 'dangling_indices/list_dangling_indices/ListDanglingIndicesRequest.ts#L22-L35', 'dangling_indices.list_dangling_indices.Response': 'dangling_indices/list_dangling_indices/ListDanglingIndicesResponse.ts#L23-L27', 'enrich._types.Policy': 'enrich/_types/Policy.ts#L34-L41', 'enrich._types.PolicyType': 'enrich/_types/Policy.ts#L28-L32', @@ -1369,7 +1379,7 @@ 'ilm._types.MigrateAction': 'ilm/_types/Phase.ts#L144-L146', 'ilm._types.Phase': 'ilm/_types/Phase.ts#L26-L32', 'ilm._types.Phases': 'ilm/_types/Phase.ts#L34-L40', -'ilm._types.Policy': 'ilm/_types/Policy.ts#L23-L26', +'ilm._types.Policy': 'ilm/_types/Policy.ts#L23-L29', 'ilm._types.RolloverAction': 'ilm/_types/Phase.ts#L102-L113', 'ilm._types.SearchableSnapshotAction': 'ilm/_types/Phase.ts#L131-L134', 'ilm._types.SetPriorityAction': 'ilm/_types/Phase.ts#L98-L100', @@ -1389,10 +1399,10 @@ 'ilm.get_status.Request': 'ilm/get_status/GetIlmStatusRequest.ts#L22-L29', 'ilm.get_status.Response': 'ilm/get_status/GetIlmStatusResponse.ts#L22-L24', 'ilm.migrate_to_data_tiers.Request': 'ilm/migrate_to_data_tiers/Request.ts#L22-L54', -'ilm.migrate_to_data_tiers.Response': 'ilm/migrate_to_data_tiers/Response.ts#L22-L32', -'ilm.move_to_step.Request': 'ilm/move_to_step/MoveToStepRequest.ts#L24-L51', +'ilm.migrate_to_data_tiers.Response': 'ilm/migrate_to_data_tiers/Response.ts#L22-L51', +'ilm.move_to_step.Request': 'ilm/move_to_step/MoveToStepRequest.ts#L24-L57', 'ilm.move_to_step.Response': 'ilm/move_to_step/MoveToStepResponse.ts#L22-L24', -'ilm.move_to_step.StepKey': 'ilm/move_to_step/types.ts#L20-L25', +'ilm.move_to_step.StepKey': 'ilm/move_to_step/types.ts#L20-L31', 'ilm.put_lifecycle.Request': 'ilm/put_lifecycle/PutLifecycleRequest.ts#L25-L59', 'ilm.put_lifecycle.Response': 'ilm/put_lifecycle/PutLifecycleResponse.ts#L22-L24', 'ilm.remove_policy.Request': 'ilm/remove_policy/RemovePolicyRequest.ts#L23-L35', @@ -1493,83 +1503,86 @@ 'indices.analyze.AnalyzerDetail': 'indices/analyze/types.ts#L32-L35', 'indices.analyze.CharFilterDetail': 'indices/analyze/types.ts#L46-L49', 'indices.analyze.ExplainAnalyzeToken': 'indices/analyze/types.ts#L52-L67', -'indices.analyze.Request': 'indices/analyze/IndicesAnalyzeRequest.ts#L27-L93', +'indices.analyze.Request': 'indices/analyze/IndicesAnalyzeRequest.ts#L27-L100', 'indices.analyze.Response': 'indices/analyze/IndicesAnalyzeResponse.ts#L22-L27', 'indices.analyze.TokenDetail': 'indices/analyze/types.ts#L71-L74', -'indices.clear_cache.Request': 'indices/clear_cache/IndicesIndicesClearCacheRequest.ts#L23-L78', +'indices.clear_cache.Request': 'indices/clear_cache/IndicesClearCacheRequest.ts#L23-L83', 'indices.clear_cache.Response': 'indices/clear_cache/IndicesClearCacheResponse.ts#L22-L24', -'indices.clone.Request': 'indices/clone/IndicesCloneRequest.ts#L27-L98', +'indices.clone.Request': 'indices/clone/IndicesCloneRequest.ts#L27-L120', 'indices.clone.Response': 'indices/clone/IndicesCloneResponse.ts#L22-L28', 'indices.close.CloseIndexResult': 'indices/close/CloseIndexResponse.ts#L32-L35', 'indices.close.CloseShardResult': 'indices/close/CloseIndexResponse.ts#L37-L39', -'indices.close.Request': 'indices/close/CloseIndexRequest.ts#L24-L94', +'indices.close.Request': 'indices/close/CloseIndexRequest.ts#L24-L95', 'indices.close.Response': 'indices/close/CloseIndexResponse.ts#L24-L30', -'indices.create.Request': 'indices/create/IndicesCreateRequest.ts#L28-L82', +'indices.create.Request': 'indices/create/IndicesCreateRequest.ts#L28-L102', 'indices.create.Response': 'indices/create/IndicesCreateResponse.ts#L22-L28', 'indices.create_data_stream.Request': 'indices/create_data_stream/IndicesCreateDataStreamRequest.ts#L24-L58', 'indices.create_data_stream.Response': 'indices/create_data_stream/IndicesCreateDataStreamResponse.ts#L22-L24', 'indices.data_streams_stats.DataStreamsStatsItem': 'indices/data_streams_stats/IndicesDataStreamsStatsResponse.ts#L45-L65', 'indices.data_streams_stats.Request': 'indices/data_streams_stats/IndicesDataStreamsStatsRequest.ts#L23-L49', 'indices.data_streams_stats.Response': 'indices/data_streams_stats/IndicesDataStreamsStatsResponse.ts#L25-L43', -'indices.delete.Request': 'indices/delete/IndicesDeleteRequest.ts#L24-L74', +'indices.delete.Request': 'indices/delete/IndicesDeleteRequest.ts#L24-L81', 'indices.delete.Response': 'indices/delete/IndicesDeleteResponse.ts#L22-L24', -'indices.delete_alias.Request': 'indices/delete_alias/IndicesDeleteAliasRequest.ts#L24-L58', +'indices.delete_alias.Request': 'indices/delete_alias/IndicesDeleteAliasRequest.ts#L24-L60', 'indices.delete_alias.Response': 'indices/delete_alias/IndicesDeleteAliasResponse.ts#L22-L24', 'indices.delete_data_lifecycle.Request': 'indices/delete_data_lifecycle/IndicesDeleteDataLifecycleRequest.ts#L24-L40', 'indices.delete_data_lifecycle.Response': 'indices/delete_data_lifecycle/IndicesDeleteDataLifecycleResponse.ts#L22-L24', -'indices.delete_data_stream.Request': 'indices/delete_data_stream/IndicesDeleteDataStreamRequest.ts#L24-L51', +'indices.delete_data_stream.Request': 'indices/delete_data_stream/IndicesDeleteDataStreamRequest.ts#L24-L52', 'indices.delete_data_stream.Response': 'indices/delete_data_stream/IndicesDeleteDataStreamResponse.ts#L22-L24', -'indices.delete_index_template.Request': 'indices/delete_index_template/IndicesDeleteIndexTemplateRequest.ts#L24-L53', +'indices.delete_index_template.Request': 'indices/delete_index_template/IndicesDeleteIndexTemplateRequest.ts#L24-L54', 'indices.delete_index_template.Response': 'indices/delete_index_template/IndicesDeleteIndexTemplateResponse.ts#L22-L24', -'indices.delete_template.Request': 'indices/delete_template/IndicesDeleteTemplateRequest.ts#L24-L52', +'indices.delete_template.Request': 'indices/delete_template/IndicesDeleteTemplateRequest.ts#L24-L53', 'indices.delete_template.Response': 'indices/delete_template/IndicesDeleteTemplateResponse.ts#L22-L24', -'indices.disk_usage.Request': 'indices/disk_usage/IndicesDiskUsageRequest.ts#L23-L74', +'indices.disk_usage.Request': 'indices/disk_usage/IndicesDiskUsageRequest.ts#L23-L78', 'indices.disk_usage.Response': 'indices/disk_usage/IndicesDiskUsageResponse.ts#L22-L25', -'indices.downsample.Request': 'indices/downsample/Request.ts#L24-L51', +'indices.downsample.Request': 'indices/downsample/Request.ts#L24-L52', 'indices.downsample.Response': 'indices/downsample/Response.ts#L22-L25', -'indices.exists.Request': 'indices/exists/IndicesExistsRequest.ts#L23-L73', +'indices.exists.Request': 'indices/exists/IndicesExistsRequest.ts#L23-L74', 'indices.exists_alias.Request': 'indices/exists_alias/IndicesExistsAliasRequest.ts#L23-L69', 'indices.exists_index_template.Request': 'indices/exists_index_template/IndicesExistsIndexTemplateRequest.ts#L24-L43', -'indices.exists_template.Request': 'indices/exists_template/IndicesExistsTemplateRequest.ts#L24-L39', +'indices.exists_template.Request': 'indices/exists_template/IndicesExistsTemplateRequest.ts#L24-L63', 'indices.explain_data_lifecycle.DataStreamLifecycleExplain': 'indices/explain_data_lifecycle/IndicesExplainDataLifecycleResponse.ts#L31-L41', -'indices.explain_data_lifecycle.Request': 'indices/explain_data_lifecycle/IndicesExplainDataLifecycleRequest.ts#L24-L39', +'indices.explain_data_lifecycle.Request': 'indices/explain_data_lifecycle/IndicesExplainDataLifecycleRequest.ts#L24-L40', 'indices.explain_data_lifecycle.Response': 'indices/explain_data_lifecycle/IndicesExplainDataLifecycleResponse.ts#L25-L29', 'indices.field_usage_stats.FieldSummary': 'indices/field_usage_stats/IndicesFieldUsageStatsResponse.ts#L57-L66', 'indices.field_usage_stats.FieldsUsageBody': 'indices/field_usage_stats/IndicesFieldUsageStatsResponse.ts#L32-L39', 'indices.field_usage_stats.InvertedIndex': 'indices/field_usage_stats/IndicesFieldUsageStatsResponse.ts#L68-L76', -'indices.field_usage_stats.Request': 'indices/field_usage_stats/IndicesFieldUsageStatsRequest.ts#L29-L87', +'indices.field_usage_stats.Request': 'indices/field_usage_stats/IndicesFieldUsageStatsRequest.ts#L29-L91', 'indices.field_usage_stats.Response': 'indices/field_usage_stats/IndicesFieldUsageStatsResponse.ts#L28-L30', 'indices.field_usage_stats.ShardsStats': 'indices/field_usage_stats/IndicesFieldUsageStatsResponse.ts#L52-L55', 'indices.field_usage_stats.UsageStatsIndex': 'indices/field_usage_stats/IndicesFieldUsageStatsResponse.ts#L41-L43', 'indices.field_usage_stats.UsageStatsShards': 'indices/field_usage_stats/IndicesFieldUsageStatsResponse.ts#L45-L50', -'indices.flush.Request': 'indices/flush/IndicesFlushRequest.ts#L23-L81', +'indices.flush.Request': 'indices/flush/IndicesFlushRequest.ts#L23-L82', 'indices.flush.Response': 'indices/flush/IndicesFlushResponse.ts#L22-L24', -'indices.forcemerge.Request': 'indices/forcemerge/IndicesForceMergeRequest.ts#L24-L56', +'indices.forcemerge.Request': 'indices/forcemerge/IndicesForceMergeRequest.ts#L24-L98', 'indices.forcemerge.Response': 'indices/forcemerge/IndicesForceMergeResponse.ts#L22-L24', 'indices.forcemerge._types.ForceMergeResponseBody': 'indices/forcemerge/_types/response.ts#L22-L28', -'indices.get.Feature': 'indices/get/IndicesGetRequest.ts#L91-L95', -'indices.get.Request': 'indices/get/IndicesGetRequest.ts#L24-L89', +'indices.get.Feature': 'indices/get/IndicesGetRequest.ts#L92-L96', +'indices.get.Request': 'indices/get/IndicesGetRequest.ts#L24-L90', 'indices.get.Response': 'indices/get/IndicesGetResponse.ts#L24-L27', 'indices.get_alias.IndexAliases': 'indices/get_alias/IndicesGetAliasResponse.ts#L37-L39', -'indices.get_alias.Request': 'indices/get_alias/IndicesGetAliasRequest.ts#L23-L72', +'indices.get_alias.Request': 'indices/get_alias/IndicesGetAliasRequest.ts#L23-L74', 'indices.get_alias.Response': 'indices/get_alias/IndicesGetAliasResponse.ts#L26-L35', 'indices.get_data_lifecycle.DataStreamWithLifecycle': 'indices/get_data_lifecycle/IndicesGetDataLifecycleResponse.ts#L27-L30', 'indices.get_data_lifecycle.Request': 'indices/get_data_lifecycle/IndicesGetDataLifecycleRequest.ts#L24-L60', 'indices.get_data_lifecycle.Response': 'indices/get_data_lifecycle/IndicesGetDataLifecycleResponse.ts#L23-L25', +'indices.get_data_lifecycle_stats.DataStreamStats': 'indices/get_data_lifecycle_stats/IndicesGetDataLifecycleStatsResponse.ts#L46-L59', +'indices.get_data_lifecycle_stats.Request': 'indices/get_data_lifecycle_stats/IndicesGetDataLifecycleStatsRequest.ts#L22-L31', +'indices.get_data_lifecycle_stats.Response': 'indices/get_data_lifecycle_stats/IndicesGetDataLifecycleStatsResponse.ts#L24-L44', 'indices.get_data_stream.Request': 'indices/get_data_stream/IndicesGetDataStreamRequest.ts#L24-L66', 'indices.get_data_stream.Response': 'indices/get_data_stream/IndicesGetDataStreamResponse.ts#L22-L24', -'indices.get_field_mapping.Request': 'indices/get_field_mapping/IndicesGetFieldMappingRequest.ts#L23-L74', +'indices.get_field_mapping.Request': 'indices/get_field_mapping/IndicesGetFieldMappingRequest.ts#L23-L79', 'indices.get_field_mapping.Response': 'indices/get_field_mapping/IndicesGetFieldMappingResponse.ts#L24-L27', 'indices.get_field_mapping.TypeFieldMappings': 'indices/get_field_mapping/types.ts#L24-L26', 'indices.get_index_template.IndexTemplateItem': 'indices/get_index_template/IndicesGetIndexTemplateResponse.ts#L29-L32', -'indices.get_index_template.Request': 'indices/get_index_template/IndicesGetIndexTemplateRequest.ts#L24-L61', +'indices.get_index_template.Request': 'indices/get_index_template/IndicesGetIndexTemplateRequest.ts#L24-L62', 'indices.get_index_template.Response': 'indices/get_index_template/IndicesGetIndexTemplateResponse.ts#L23-L27', 'indices.get_mapping.IndexMappingRecord': 'indices/get_mapping/IndicesGetMappingResponse.ts#L29-L32', -'indices.get_mapping.Request': 'indices/get_mapping/IndicesGetMappingRequest.ts#L24-L73', +'indices.get_mapping.Request': 'indices/get_mapping/IndicesGetMappingRequest.ts#L24-L74', 'indices.get_mapping.Response': 'indices/get_mapping/IndicesGetMappingResponse.ts#L24-L27', -'indices.get_settings.Request': 'indices/get_settings/IndicesGetSettingsRequest.ts#L24-L92', +'indices.get_settings.Request': 'indices/get_settings/IndicesGetSettingsRequest.ts#L24-L93', 'indices.get_settings.Response': 'indices/get_settings/IndicesGetSettingsResponse.ts#L24-L27', -'indices.get_template.Request': 'indices/get_template/IndicesGetTemplateRequest.ts#L24-L57', +'indices.get_template.Request': 'indices/get_template/IndicesGetTemplateRequest.ts#L24-L62', 'indices.get_template.Response': 'indices/get_template/IndicesGetTemplateResponse.ts#L23-L26', 'indices.migrate_to_data_stream.Request': 'indices/migrate_to_data_stream/IndicesMigrateToDataStreamRequest.ts#L24-L59', 'indices.migrate_to_data_stream.Response': 'indices/migrate_to_data_stream/IndicesMigrateToDataStreamResponse.ts#L22-L24', @@ -1577,22 +1590,22 @@ 'indices.modify_data_stream.IndexAndDataStreamAction': 'indices/modify_data_stream/types.ts#L39-L44', 'indices.modify_data_stream.Request': 'indices/modify_data_stream/IndicesModifyDataStreamRequest.ts#L23-L38', 'indices.modify_data_stream.Response': 'indices/modify_data_stream/IndicesModifyDataStreamResponse.ts#L22-L24', -'indices.open.Request': 'indices/open/IndicesOpenRequest.ts#L24-L82', +'indices.open.Request': 'indices/open/IndicesOpenRequest.ts#L24-L105', 'indices.open.Response': 'indices/open/IndicesOpenResponse.ts#L20-L25', -'indices.promote_data_stream.Request': 'indices/promote_data_stream/IndicesPromoteDataStreamRequest.ts#L24-L50', +'indices.promote_data_stream.Request': 'indices/promote_data_stream/IndicesPromoteDataStreamRequest.ts#L24-L51', 'indices.promote_data_stream.Response': 'indices/promote_data_stream/IndicesPromoteDataStreamResponse.ts#L22-L25', 'indices.put_alias.Request': 'indices/put_alias/IndicesPutAliasRequest.ts#L25-L92', 'indices.put_alias.Response': 'indices/put_alias/IndicesPutAliasResponse.ts#L22-L24', -'indices.put_data_lifecycle.Request': 'indices/put_data_lifecycle/IndicesPutDataLifecycleRequest.ts#L25-L76', +'indices.put_data_lifecycle.Request': 'indices/put_data_lifecycle/IndicesPutDataLifecycleRequest.ts#L25-L77', 'indices.put_data_lifecycle.Response': 'indices/put_data_lifecycle/IndicesPutDataLifecycleResponse.ts#L22-L24', -'indices.put_index_template.IndexTemplateMapping': 'indices/put_index_template/IndicesPutIndexTemplateRequest.ts#L121-L143', -'indices.put_index_template.Request': 'indices/put_index_template/IndicesPutIndexTemplateRequest.ts#L37-L119', +'indices.put_index_template.IndexTemplateMapping': 'indices/put_index_template/IndicesPutIndexTemplateRequest.ts#L152-L174', +'indices.put_index_template.Request': 'indices/put_index_template/IndicesPutIndexTemplateRequest.ts#L37-L150', 'indices.put_index_template.Response': 'indices/put_index_template/IndicesPutIndexTemplateResponse.ts#L22-L24', -'indices.put_mapping.Request': 'indices/put_mapping/IndicesPutMappingRequest.ts#L41-L149', +'indices.put_mapping.Request': 'indices/put_mapping/IndicesPutMappingRequest.ts#L41-L177', 'indices.put_mapping.Response': 'indices/put_mapping/IndicesPutMappingResponse.ts#L22-L24', -'indices.put_settings.Request': 'indices/put_settings/IndicesPutSettingsRequest.ts#L25-L93', +'indices.put_settings.Request': 'indices/put_settings/IndicesPutSettingsRequest.ts#L25-L108', 'indices.put_settings.Response': 'indices/put_settings/IndicesPutSettingsResponse.ts#L22-L24', -'indices.put_template.Request': 'indices/put_template/IndicesPutTemplateRequest.ts#L29-L107', +'indices.put_template.Request': 'indices/put_template/IndicesPutTemplateRequest.ts#L29-L118', 'indices.put_template.Response': 'indices/put_template/IndicesPutTemplateResponse.ts#L22-L24', 'indices.recovery.FileDetails': 'indices/recovery/types.ts#L50-L54', 'indices.recovery.RecoveryBytes': 'indices/recovery/types.ts#L38-L48', @@ -1601,36 +1614,36 @@ 'indices.recovery.RecoveryOrigin': 'indices/recovery/types.ts#L76-L89', 'indices.recovery.RecoveryStartStatus': 'indices/recovery/types.ts#L91-L96', 'indices.recovery.RecoveryStatus': 'indices/recovery/types.ts#L98-L100', -'indices.recovery.Request': 'indices/recovery/IndicesRecoveryRequest.ts#L23-L70', +'indices.recovery.Request': 'indices/recovery/IndicesRecoveryRequest.ts#L23-L74', 'indices.recovery.Response': 'indices/recovery/IndicesRecoveryResponse.ts#L24-L27', 'indices.recovery.ShardRecovery': 'indices/recovery/types.ts#L118-L135', 'indices.recovery.TranslogStatus': 'indices/recovery/types.ts#L102-L109', 'indices.recovery.VerifyIndex': 'indices/recovery/types.ts#L111-L116', -'indices.refresh.Request': 'indices/refresh/IndicesRefreshRequest.ts#L23-L61', +'indices.refresh.Request': 'indices/refresh/IndicesRefreshRequest.ts#L23-L74', 'indices.refresh.Response': 'indices/refresh/IndicesRefreshResponse.ts#L22-L24', 'indices.reload_search_analyzers.ReloadDetails': 'indices/reload_search_analyzers/types.ts#L27-L31', 'indices.reload_search_analyzers.ReloadResult': 'indices/reload_search_analyzers/types.ts#L22-L25', -'indices.reload_search_analyzers.Request': 'indices/reload_search_analyzers/ReloadSearchAnalyzersRequest.ts#L23-L51', +'indices.reload_search_analyzers.Request': 'indices/reload_search_analyzers/ReloadSearchAnalyzersRequest.ts#L23-L53', 'indices.reload_search_analyzers.Response': 'indices/reload_search_analyzers/ReloadSearchAnalyzersResponse.ts#L22-L24', -'indices.resolve_cluster.Request': 'indices/resolve_cluster/ResolveClusterRequest.ts#L23-L76', +'indices.resolve_cluster.Request': 'indices/resolve_cluster/ResolveClusterRequest.ts#L23-L89', 'indices.resolve_cluster.ResolveClusterInfo': 'indices/resolve_cluster/ResolveClusterResponse.ts#L29-L55', 'indices.resolve_cluster.Response': 'indices/resolve_cluster/ResolveClusterResponse.ts#L24-L27', -'indices.resolve_index.Request': 'indices/resolve_index/ResolveIndexRequest.ts#L23-L61', +'indices.resolve_index.Request': 'indices/resolve_index/ResolveIndexRequest.ts#L23-L63', 'indices.resolve_index.ResolveIndexAliasItem': 'indices/resolve_index/ResolveIndexResponse.ts#L37-L40', 'indices.resolve_index.ResolveIndexDataStreamsItem': 'indices/resolve_index/ResolveIndexResponse.ts#L42-L46', 'indices.resolve_index.ResolveIndexItem': 'indices/resolve_index/ResolveIndexResponse.ts#L30-L35', 'indices.resolve_index.Response': 'indices/resolve_index/ResolveIndexResponse.ts#L22-L28', -'indices.rollover.Request': 'indices/rollover/IndicesRolloverRequest.ts#L29-L100', +'indices.rollover.Request': 'indices/rollover/IndicesRolloverRequest.ts#L29-L137', 'indices.rollover.Response': 'indices/rollover/IndicesRolloverResponse.ts#L22-L32', 'indices.rollover.RolloverConditions': 'indices/rollover/types.ts#L24-L40', 'indices.segments.IndexSegment': 'indices/segments/types.ts#L24-L26', -'indices.segments.Request': 'indices/segments/IndicesSegmentsRequest.ts#L23-L66', +'indices.segments.Request': 'indices/segments/IndicesSegmentsRequest.ts#L23-L68', 'indices.segments.Response': 'indices/segments/IndicesSegmentsResponse.ts#L24-L29', 'indices.segments.Segment': 'indices/segments/types.ts#L28-L38', 'indices.segments.ShardSegmentRouting': 'indices/segments/types.ts#L40-L44', 'indices.segments.ShardsSegment': 'indices/segments/types.ts#L46-L51', 'indices.shard_stores.IndicesShardStores': 'indices/shard_stores/types.ts#L25-L27', -'indices.shard_stores.Request': 'indices/shard_stores/IndicesShardStoresRequest.ts#L24-L71', +'indices.shard_stores.Request': 'indices/shard_stores/IndicesShardStoresRequest.ts#L24-L72', 'indices.shard_stores.Response': 'indices/shard_stores/IndicesShardStoresResponse.ts#L24-L26', 'indices.shard_stores.ShardStore': 'indices/shard_stores/types.ts#L29-L36', 'indices.shard_stores.ShardStoreAllocation': 'indices/shard_stores/types.ts#L47-L51', @@ -1640,19 +1653,19 @@ 'indices.shard_stores.ShardStoreWrapper': 'indices/shard_stores/types.ts#L58-L60', 'indices.shrink.Request': 'indices/shrink/IndicesShrinkRequest.ts#L27-L107', 'indices.shrink.Response': 'indices/shrink/IndicesShrinkResponse.ts#L22-L28', -'indices.simulate_index_template.Request': 'indices/simulate_index_template/IndicesSimulateIndexTemplateRequest.ts#L24-L50', +'indices.simulate_index_template.Request': 'indices/simulate_index_template/IndicesSimulateIndexTemplateRequest.ts#L24-L52', 'indices.simulate_index_template.Response': 'indices/simulate_index_template/IndicesSimulateIndexTemplateResponse.ts#L25-L30', 'indices.simulate_template.Overlapping': 'indices/simulate_template/IndicesSimulateTemplateResponse.ts#L39-L42', -'indices.simulate_template.Request': 'indices/simulate_template/IndicesSimulateTemplateRequest.ts#L27-L120', +'indices.simulate_template.Request': 'indices/simulate_template/IndicesSimulateTemplateRequest.ts#L27-L121', 'indices.simulate_template.Response': 'indices/simulate_template/IndicesSimulateTemplateResponse.ts#L26-L31', 'indices.simulate_template.Template': 'indices/simulate_template/IndicesSimulateTemplateResponse.ts#L33-L37', -'indices.split.Request': 'indices/split/IndicesSplitRequest.ts#L27-L98', +'indices.split.Request': 'indices/split/IndicesSplitRequest.ts#L27-L107', 'indices.split.Response': 'indices/split/IndicesSplitResponse.ts#L22-L28', 'indices.stats.IndexMetadataState': 'indices/stats/types.ts#L225-L232', 'indices.stats.IndexStats': 'indices/stats/types.ts#L52-L93', 'indices.stats.IndicesStats': 'indices/stats/types.ts#L95-L110', 'indices.stats.MappingStats': 'indices/stats/types.ts#L186-L190', -'indices.stats.Request': 'indices/stats/IndicesStatsRequest.ts#L29-L94', +'indices.stats.Request': 'indices/stats/IndicesStatsRequest.ts#L29-L95', 'indices.stats.Response': 'indices/stats/IndicesStatsResponse.ts#L24-L30', 'indices.stats.ShardCommit': 'indices/stats/types.ts#L112-L117', 'indices.stats.ShardFileSizeInfo': 'indices/stats/types.ts#L124-L131', @@ -1665,7 +1678,7 @@ 'indices.stats.ShardSequenceNumber': 'indices/stats/types.ts#L176-L180', 'indices.stats.ShardStats': 'indices/stats/types.ts#L192-L223', 'indices.stats.ShardsTotalStats': 'indices/stats/types.ts#L182-L184', -'indices.unfreeze.Request': 'indices/unfreeze/IndicesUnfreezeRequest.ts#L24-L77', +'indices.unfreeze.Request': 'indices/unfreeze/IndicesUnfreezeRequest.ts#L24-L79', 'indices.unfreeze.Response': 'indices/unfreeze/IndicesUnfreezeResponse.ts#L20-L25', 'indices.update_aliases.Action': 'indices/update_aliases/types.ts#L23-L39', 'indices.update_aliases.AddAction': 'indices/update_aliases/types.ts#L41-L95', @@ -1692,7 +1705,7 @@ 'inference.get.Response': 'inference/get/GetResponse.ts#L22-L26', 'inference.inference.Request': 'inference/inference/InferenceRequest.ts#L26-L66', 'inference.inference.Response': 'inference/inference/InferenceResponse.ts#L22-L24', -'inference.put.Request': 'inference/put/PutRequest.ts#L25-L44', +'inference.put.Request': 'inference/put/PutRequest.ts#L25-L54', 'inference.put.Response': 'inference/put/PutResponse.ts#L22-L24', 'ingest._types.AppendProcessor': 'ingest/_types/Processors.ts#L328-L343', 'ingest._types.AttachmentProcessor': 'ingest/_types/Processors.ts#L345-L386', @@ -1736,6 +1749,7 @@ 'ingest._types.Maxmind': 'ingest/_types/Database.ts#L55-L57', 'ingest._types.NetworkDirectionProcessor': 'ingest/_types/Processors.ts#L1230-L1264', 'ingest._types.Pipeline': 'ingest/_types/Pipeline.ts#L23-L51', +'ingest._types.PipelineConfig': 'ingest/_types/Pipeline.ts#L67-L81', 'ingest._types.PipelineProcessor': 'ingest/_types/Processors.ts#L1266-L1277', 'ingest._types.ProcessorBase': 'ingest/_types/Processors.ts#L303-L326', 'ingest._types.ProcessorContainer': 'ingest/_types/Processors.ts#L27-L301', @@ -1760,7 +1774,7 @@ 'ingest._types.Web': 'ingest/_types/Database.ts#L61-L61', 'ingest.delete_geoip_database.Request': 'ingest/delete_geoip_database/DeleteGeoipDatabaseRequest.ts#L24-L49', 'ingest.delete_geoip_database.Response': 'ingest/delete_geoip_database/DeleteGeoipDatabaseResponse.ts#L22-L24', -'ingest.delete_ip_location_database.Request': 'ingest/delete_ip_location_database/DeleteIpLocationDatabaseRequest.ts#L24-L48', +'ingest.delete_ip_location_database.Request': 'ingest/delete_ip_location_database/DeleteIpLocationDatabaseRequest.ts#L24-L53', 'ingest.delete_ip_location_database.Response': 'ingest/delete_ip_location_database/DeleteIpLocationDatabaseResponse.ts#L22-L24', 'ingest.delete_pipeline.Request': 'ingest/delete_pipeline/DeletePipelineRequest.ts#L24-L54', 'ingest.delete_pipeline.Response': 'ingest/delete_pipeline/DeletePipelineResponse.ts#L22-L24', @@ -1773,7 +1787,7 @@ 'ingest.get_geoip_database.Request': 'ingest/get_geoip_database/GetGeoipDatabaseRequest.ts#L24-L47', 'ingest.get_geoip_database.Response': 'ingest/get_geoip_database/GetGeoipDatabaseResponse.ts#L25-L27', 'ingest.get_ip_location_database.DatabaseConfigurationMetadata': 'ingest/get_ip_location_database/GetIpLocationDatabaseResponse.ts#L28-L34', -'ingest.get_ip_location_database.Request': 'ingest/get_ip_location_database/GetIpLocationDatabaseRequest.ts#L24-L46', +'ingest.get_ip_location_database.Request': 'ingest/get_ip_location_database/GetIpLocationDatabaseRequest.ts#L24-L48', 'ingest.get_ip_location_database.Response': 'ingest/get_ip_location_database/GetIpLocationDatabaseResponse.ts#L24-L26', 'ingest.get_pipeline.Request': 'ingest/get_pipeline/GetPipelineRequest.ts#L24-L52', 'ingest.get_pipeline.Response': 'ingest/get_pipeline/GetPipelineResponse.ts#L23-L26', @@ -1781,7 +1795,7 @@ 'ingest.processor_grok.Response': 'ingest/processor_grok/GrokProcessorPatternsResponse.ts#L22-L24', 'ingest.put_geoip_database.Request': 'ingest/put_geoip_database/PutGeoipDatabaseRequest.ts#L25-L58', 'ingest.put_geoip_database.Response': 'ingest/put_geoip_database/PutGeoipDatabaseResponse.ts#L22-L24', -'ingest.put_ip_location_database.Request': 'ingest/put_ip_location_database/PutIpLocationDatabaseRequest.ts#L25-L51', +'ingest.put_ip_location_database.Request': 'ingest/put_ip_location_database/PutIpLocationDatabaseRequest.ts#L25-L55', 'ingest.put_ip_location_database.Response': 'ingest/put_ip_location_database/PutIpLocationDatabaseResponse.ts#L22-L24', 'ingest.put_pipeline.Request': 'ingest/put_pipeline/PutPipelineRequest.ts#L25-L84', 'ingest.put_pipeline.Response': 'ingest/put_pipeline/PutPipelineResponse.ts#L22-L24', @@ -1810,26 +1824,26 @@ 'license.post.Response': 'license/post/PostLicenseResponse.ts#L23-L29', 'license.post_start_basic.Request': 'license/post_start_basic/StartBasicLicenseRequest.ts#L22-L40', 'license.post_start_basic.Response': 'license/post_start_basic/StartBasicLicenseResponse.ts#L23-L31', -'license.post_start_trial.Request': 'license/post_start_trial/StartTrialLicenseRequest.ts#L22-L39', +'license.post_start_trial.Request': 'license/post_start_trial/StartTrialLicenseRequest.ts#L22-L40', 'license.post_start_trial.Response': 'license/post_start_trial/StartTrialLicenseResponse.ts#L22-L29', -'logstash._types.Pipeline': 'logstash/_types/Pipeline.ts#L60-L92', +'logstash._types.Pipeline': 'logstash/_types/Pipeline.ts#L60-L91', 'logstash._types.PipelineMetadata': 'logstash/_types/Pipeline.ts#L23-L26', 'logstash._types.PipelineSettings': 'logstash/_types/Pipeline.ts#L28-L59', -'logstash.delete_pipeline.Request': 'logstash/delete_pipeline/LogstashDeletePipelineRequest.ts#L23-L37', -'logstash.get_pipeline.Request': 'logstash/get_pipeline/LogstashGetPipelineRequest.ts#L23-L37', +'logstash.delete_pipeline.Request': 'logstash/delete_pipeline/LogstashDeletePipelineRequest.ts#L23-L41', +'logstash.get_pipeline.Request': 'logstash/get_pipeline/LogstashGetPipelineRequest.ts#L23-L40', 'logstash.get_pipeline.Response': 'logstash/get_pipeline/LogstashGetPipelineResponse.ts#L24-L27', -'logstash.put_pipeline.Request': 'logstash/put_pipeline/LogstashPutPipelineRequest.ts#L24-L39', -'migration.deprecations.Deprecation': 'migration/deprecations/types.ts#L32-L40', +'logstash.put_pipeline.Request': 'logstash/put_pipeline/LogstashPutPipelineRequest.ts#L24-L45', +'migration.deprecations.Deprecation': 'migration/deprecations/types.ts#L32-L47', 'migration.deprecations.DeprecationLevel': 'migration/deprecations/types.ts#L23-L30', -'migration.deprecations.Request': 'migration/deprecations/DeprecationInfoRequest.ts#L23-L32', -'migration.deprecations.Response': 'migration/deprecations/DeprecationInfoResponse.ts#L23-L31', +'migration.deprecations.Request': 'migration/deprecations/DeprecationInfoRequest.ts#L23-L39', +'migration.deprecations.Response': 'migration/deprecations/DeprecationInfoResponse.ts#L23-L45', 'migration.get_feature_upgrade_status.MigrationFeature': 'migration/get_feature_upgrade_status/GetFeatureUpgradeStatusResponse.ts#L37-L42', 'migration.get_feature_upgrade_status.MigrationFeatureIndexInfo': 'migration/get_feature_upgrade_status/GetFeatureUpgradeStatusResponse.ts#L44-L48', 'migration.get_feature_upgrade_status.MigrationStatus': 'migration/get_feature_upgrade_status/GetFeatureUpgradeStatusResponse.ts#L30-L35', -'migration.get_feature_upgrade_status.Request': 'migration/get_feature_upgrade_status/GetFeatureUpgradeStatusRequest.ts#L22-L27', +'migration.get_feature_upgrade_status.Request': 'migration/get_feature_upgrade_status/GetFeatureUpgradeStatusRequest.ts#L22-L35', 'migration.get_feature_upgrade_status.Response': 'migration/get_feature_upgrade_status/GetFeatureUpgradeStatusResponse.ts#L23-L28', 'migration.post_feature_upgrade.MigrationFeature': 'migration/post_feature_upgrade/PostFeatureUpgradeResponse.ts#L27-L29', -'migration.post_feature_upgrade.Request': 'migration/post_feature_upgrade/PostFeatureUpgradeRequest.ts#L22-L27', +'migration.post_feature_upgrade.Request': 'migration/post_feature_upgrade/PostFeatureUpgradeRequest.ts#L22-L36', 'migration.post_feature_upgrade.Response': 'migration/post_feature_upgrade/PostFeatureUpgradeResponse.ts#L20-L25', 'ml._types.AdaptiveAllocationsSettings': 'ml/_types/TrainedModel.ts#L109-L113', 'ml._types.AnalysisConfig': 'ml/_types/Analysis.ts#L29-L77', @@ -2042,7 +2056,7 @@ 'ml.evaluate_data_frame.DataframeOutlierDetectionSummary': 'ml/evaluate_data_frame/types.ts#L24-L42', 'ml.evaluate_data_frame.DataframeRegressionSummary': 'ml/evaluate_data_frame/types.ts#L68-L85', 'ml.evaluate_data_frame.Request': 'ml/evaluate_data_frame/MlEvaluateDataFrameRequest.ts#L25-L53', -'ml.evaluate_data_frame.Response': 'ml/evaluate_data_frame/MlEvaluateDataFrameResponse.ts#L26-L33', +'ml.evaluate_data_frame.Response': 'ml/evaluate_data_frame/MlEvaluateDataFrameResponse.ts#L26-L44', 'ml.explain_data_frame_analytics.Request': 'ml/explain_data_frame_analytics/MlExplainDataFrameAnalyticsRequest.ts#L30-L108', 'ml.explain_data_frame_analytics.Response': 'ml/explain_data_frame_analytics/MlExplainDataFrameAnalyticsResponse.ts#L25-L32', 'ml.flush_job.Request': 'ml/flush_job/MlFlushJobRequest.ts#L24-L100', @@ -2176,11 +2190,11 @@ 'ml.update_trained_model_deployment.Response': 'ml/update_trained_model_deployment/MlUpdateTrainedModelDeploymentResponse.ts#L22-L26', 'ml.upgrade_job_snapshot.Request': 'ml/upgrade_job_snapshot/MlUpgradeJobSnapshotRequest.ts#L24-L65', 'ml.upgrade_job_snapshot.Response': 'ml/upgrade_job_snapshot/MlUpgradeJobSnapshotResponse.ts#L22-L31', -'ml.validate.Request': 'ml/validate/MlValidateJobRequest.ts#L27-L44', +'ml.validate.Request': 'ml/validate/MlValidateJobRequest.ts#L27-L46', 'ml.validate.Response': 'ml/validate/MlValidateJobResponse.ts#L22-L24', -'ml.validate_detector.Request': 'ml/validate_detector/MlValidateDetectorRequest.ts#L23-L31', +'ml.validate_detector.Request': 'ml/validate_detector/MlValidateDetectorRequest.ts#L23-L33', 'ml.validate_detector.Response': 'ml/validate_detector/MlValidateDetectorResponse.ts#L22-L24', -'monitoring.bulk.Request': 'monitoring/bulk/BulkMonitoringRequest.ts#L24-L59', +'monitoring.bulk.Request': 'monitoring/bulk/BulkMonitoringRequest.ts#L24-L61', 'monitoring.bulk.Response': 'monitoring/bulk/BulkMonitoringResponse.ts#L23-L32', 'nodes._types.AdaptiveSelection': 'nodes/_types/Stats.ts#L439-L468', 'nodes._types.Breaker': 'nodes/_types/Stats.ts#L470-L495', @@ -2243,10 +2257,10 @@ 'nodes._types.TimeHttpHistogram': 'nodes/_types/Stats.ts#L708-L712', 'nodes._types.Transport': 'nodes/_types/Stats.ts#L1118-L1161', 'nodes._types.TransportHistogram': 'nodes/_types/Stats.ts#L1163-L1177', -'nodes.clear_repositories_metering_archive.Request': 'nodes/clear_repositories_metering_archive/ClearRepositoriesMeteringArchiveRequest.ts#L24-L45', -'nodes.clear_repositories_metering_archive.Response': 'nodes/clear_repositories_metering_archive/ClearRepositoriesMeteringArchiveResponse.ts#L36-L38', -'nodes.clear_repositories_metering_archive.ResponseBase': 'nodes/clear_repositories_metering_archive/ClearRepositoriesMeteringArchiveResponse.ts#L25-L34', -'nodes.get_repositories_metering_info.Request': 'nodes/get_repositories_metering_info/GetRepositoriesMeteringInfoRequest.ts#L23-L42', +'nodes.clear_repositories_metering_archive.Request': 'nodes/clear_repositories_metering_archive/ClearRepositoriesMeteringArchiveRequest.ts#L24-L46', +'nodes.clear_repositories_metering_archive.Response': 'nodes/clear_repositories_metering_archive/ClearRepositoriesMeteringArchiveResponse.ts#L37-L39', +'nodes.clear_repositories_metering_archive.ResponseBase': 'nodes/clear_repositories_metering_archive/ClearRepositoriesMeteringArchiveResponse.ts#L25-L35', +'nodes.get_repositories_metering_info.Request': 'nodes/get_repositories_metering_info/GetRepositoriesMeteringInfoRequest.ts#L23-L43', 'nodes.get_repositories_metering_info.Response': 'nodes/get_repositories_metering_info/GetRepositoriesMeteringInfoResponse.ts#L36-L38', 'nodes.get_repositories_metering_info.ResponseBase': 'nodes/get_repositories_metering_info/GetRepositoriesMeteringInfoResponse.ts#L25-L34', 'nodes.hot_threads.Request': 'nodes/hot_threads/NodesHotThreadsRequest.ts#L25-L85', @@ -2313,29 +2327,29 @@ 'nodes.usage.Request': 'nodes/usage/NodesUsageRequest.ts#L24-L50', 'nodes.usage.Response': 'nodes/usage/NodesUsageResponse.ts#L30-L32', 'nodes.usage.ResponseBase': 'nodes/usage/NodesUsageResponse.ts#L25-L28', -'query_rules._types.QueryRule': 'query_rules/_types/QueryRuleset.ts#L36-L42', -'query_rules._types.QueryRuleActions': 'query_rules/_types/QueryRuleset.ts#L70-L73', -'query_rules._types.QueryRuleCriteria': 'query_rules/_types/QueryRuleset.ts#L49-L53', -'query_rules._types.QueryRuleCriteriaType': 'query_rules/_types/QueryRuleset.ts#L55-L68', -'query_rules._types.QueryRuleType': 'query_rules/_types/QueryRuleset.ts#L44-L47', +'query_rules._types.QueryRule': 'query_rules/_types/QueryRuleset.ts#L36-L58', +'query_rules._types.QueryRuleActions': 'query_rules/_types/QueryRuleset.ts#L110-L126', +'query_rules._types.QueryRuleCriteria': 'query_rules/_types/QueryRuleset.ts#L65-L93', +'query_rules._types.QueryRuleCriteriaType': 'query_rules/_types/QueryRuleset.ts#L95-L108', +'query_rules._types.QueryRuleType': 'query_rules/_types/QueryRuleset.ts#L60-L63', 'query_rules._types.QueryRuleset': 'query_rules/_types/QueryRuleset.ts#L25-L34', -'query_rules.delete_rule.Request': 'query_rules/delete_rule/QueryRuleDeleteRequest.ts#L22-L41', +'query_rules.delete_rule.Request': 'query_rules/delete_rule/QueryRuleDeleteRequest.ts#L22-L44', 'query_rules.delete_rule.Response': 'query_rules/delete_rule/QueryRuleDeleteResponse.ts#L22-L24', -'query_rules.delete_ruleset.Request': 'query_rules/delete_ruleset/QueryRulesetDeleteRequest.ts#L22-L35', +'query_rules.delete_ruleset.Request': 'query_rules/delete_ruleset/QueryRulesetDeleteRequest.ts#L22-L39', 'query_rules.delete_ruleset.Response': 'query_rules/delete_ruleset/QueryRulesetDeleteResponse.ts#L22-L24', -'query_rules.get_rule.Request': 'query_rules/get_rule/QueryRuleGetRequest.ts#L22-L42', +'query_rules.get_rule.Request': 'query_rules/get_rule/QueryRuleGetRequest.ts#L22-L44', 'query_rules.get_rule.Response': 'query_rules/get_rule/QueryRuleGetResponse.ts#L22-L24', -'query_rules.get_ruleset.Request': 'query_rules/get_ruleset/QueryRulesetGetRequest.ts#L22-L36', +'query_rules.get_ruleset.Request': 'query_rules/get_ruleset/QueryRulesetGetRequest.ts#L22-L38', 'query_rules.get_ruleset.Response': 'query_rules/get_ruleset/QueryRulesetGetResponse.ts#L22-L24', -'query_rules.list_rulesets.QueryRulesetListItem': 'query_rules/list_rulesets/types.ts#L23-L42', -'query_rules.list_rulesets.Request': 'query_rules/list_rulesets/QueryRulesetListRequest.ts#L22-L40', +'query_rules.list_rulesets.QueryRulesetListItem': 'query_rules/list_rulesets/types.ts#L23-L44', +'query_rules.list_rulesets.Request': 'query_rules/list_rulesets/QueryRulesetListRequest.ts#L22-L43', 'query_rules.list_rulesets.Response': 'query_rules/list_rulesets/QueryRulesetListResponse.ts#L23-L28', -'query_rules.put_rule.Request': 'query_rules/put_rule/QueryRulePutRequest.ts#L28-L57', +'query_rules.put_rule.Request': 'query_rules/put_rule/QueryRulePutRequest.ts#L28-L73', 'query_rules.put_rule.Response': 'query_rules/put_rule/QueryRulePutResponse.ts#L22-L26', -'query_rules.put_ruleset.Request': 'query_rules/put_ruleset/QueryRulesetPutRequest.ts#L23-L44', +'query_rules.put_ruleset.Request': 'query_rules/put_ruleset/QueryRulesetPutRequest.ts#L23-L53', 'query_rules.put_ruleset.Response': 'query_rules/put_ruleset/QueryRulesetPutResponse.ts#L22-L26', 'query_rules.test.QueryRulesetMatchedRule': 'query_rules/test/QueryRulesetTestResponse.ts#L30-L39', -'query_rules.test.Request': 'query_rules/test/QueryRulesetTestRequest.ts#L24-L45', +'query_rules.test.Request': 'query_rules/test/QueryRulesetTestRequest.ts#L24-L51', 'query_rules.test.Response': 'query_rules/test/QueryRulesetTestResponse.ts#L23-L28', 'rollup._types.DateHistogramGrouping': 'rollup/_types/Groupings.ts#L42-L73', 'rollup._types.FieldMetric': 'rollup/_types/Metric.ts#L30-L35', @@ -2343,66 +2357,71 @@ 'rollup._types.HistogramGrouping': 'rollup/_types/Groupings.ts#L84-L97', 'rollup._types.Metric': 'rollup/_types/Metric.ts#L22-L28', 'rollup._types.TermsGrouping': 'rollup/_types/Groupings.ts#L75-L82', -'rollup.delete_job.Request': 'rollup/delete_job/DeleteRollupJobRequest.ts#L23-L35', +'rollup.delete_job.Request': 'rollup/delete_job/DeleteRollupJobRequest.ts#L23-L61', 'rollup.delete_job.Response': 'rollup/delete_job/DeleteRollupJobResponse.ts#L22-L27', -'rollup.get_jobs.IndexingJobState': 'rollup/get_jobs/types.ts#L66-L72', -'rollup.get_jobs.Request': 'rollup/get_jobs/GetRollupJobRequest.ts#L23-L36', +'rollup.get_jobs.IndexingJobState': 'rollup/get_jobs/types.ts#L77-L83', +'rollup.get_jobs.Request': 'rollup/get_jobs/GetRollupJobRequest.ts#L23-L44', 'rollup.get_jobs.Response': 'rollup/get_jobs/GetRollupJobResponse.ts#L22-L24', -'rollup.get_jobs.RollupJob': 'rollup/get_jobs/types.ts#L28-L32', -'rollup.get_jobs.RollupJobConfiguration': 'rollup/get_jobs/types.ts#L34-L43', -'rollup.get_jobs.RollupJobStats': 'rollup/get_jobs/types.ts#L45-L58', -'rollup.get_jobs.RollupJobStatus': 'rollup/get_jobs/types.ts#L60-L64', -'rollup.get_rollup_caps.Request': 'rollup/get_rollup_caps/GetRollupCapabilitiesRequest.ts#L23-L36', +'rollup.get_jobs.RollupJob': 'rollup/get_jobs/types.ts#L28-L43', +'rollup.get_jobs.RollupJobConfiguration': 'rollup/get_jobs/types.ts#L45-L54', +'rollup.get_jobs.RollupJobStats': 'rollup/get_jobs/types.ts#L56-L69', +'rollup.get_jobs.RollupJobStatus': 'rollup/get_jobs/types.ts#L71-L75', +'rollup.get_rollup_caps.Request': 'rollup/get_rollup_caps/GetRollupCapabilitiesRequest.ts#L23-L47', 'rollup.get_rollup_caps.Response': 'rollup/get_rollup_caps/GetRollupCapabilitiesResponse.ts#L24-L27', -'rollup.get_rollup_caps.RollupCapabilities': 'rollup/get_rollup_caps/types.ts#L24-L26', -'rollup.get_rollup_caps.RollupCapabilitySummary': 'rollup/get_rollup_caps/types.ts#L28-L33', -'rollup.get_rollup_caps.RollupFieldSummary': 'rollup/get_rollup_caps/types.ts#L35-L39', +'rollup.get_rollup_caps.RollupCapabilities': 'rollup/get_rollup_caps/types.ts#L24-L29', +'rollup.get_rollup_caps.RollupCapabilitySummary': 'rollup/get_rollup_caps/types.ts#L31-L36', +'rollup.get_rollup_caps.RollupFieldSummary': 'rollup/get_rollup_caps/types.ts#L38-L42', 'rollup.get_rollup_index_caps.IndexCapabilities': 'rollup/get_rollup_index_caps/types.ts#L24-L26', -'rollup.get_rollup_index_caps.Request': 'rollup/get_rollup_index_caps/GetRollupIndexCapabilitiesRequest.ts#L23-L36', +'rollup.get_rollup_index_caps.Request': 'rollup/get_rollup_index_caps/GetRollupIndexCapabilitiesRequest.ts#L23-L44', 'rollup.get_rollup_index_caps.Response': 'rollup/get_rollup_index_caps/GetRollupIndexCapabilitiesResponse.ts#L24-L27', 'rollup.get_rollup_index_caps.RollupJobSummary': 'rollup/get_rollup_index_caps/types.ts#L28-L33', 'rollup.get_rollup_index_caps.RollupJobSummaryField': 'rollup/get_rollup_index_caps/types.ts#L35-L39', -'rollup.put_job.Request': 'rollup/put_job/CreateRollupJobRequest.ts#L27-L89', +'rollup.put_job.Request': 'rollup/put_job/CreateRollupJobRequest.ts#L27-L99', 'rollup.put_job.Response': 'rollup/put_job/CreateRollupJobResponse.ts#L22-L24', -'rollup.rollup_search.Request': 'rollup/rollup_search/RollupSearchRequest.ts#L27-L57', +'rollup.rollup_search.Request': 'rollup/rollup_search/RollupSearchRequest.ts#L27-L103', 'rollup.rollup_search.Response': 'rollup/rollup_search/RollupSearchResponse.ts#L27-L36', -'rollup.start_job.Request': 'rollup/start_job/StartRollupJobRequest.ts#L23-L35', +'rollup.start_job.Request': 'rollup/start_job/StartRollupJobRequest.ts#L23-L40', 'rollup.start_job.Response': 'rollup/start_job/StartRollupJobResponse.ts#L20-L22', -'rollup.stop_job.Request': 'rollup/stop_job/StopRollupJobRequest.ts#L24-L50', +'rollup.stop_job.Request': 'rollup/stop_job/StopRollupJobRequest.ts#L24-L67', 'rollup.stop_job.Response': 'rollup/stop_job/StopRollupJobResponse.ts#L20-L22', 'search_application._types.AnalyticsCollection': 'search_application/_types/BehavioralAnalytics.ts#L22-L27', 'search_application._types.EventDataStream': 'search_application/_types/BehavioralAnalytics.ts#L29-L31', +'search_application._types.EventType': 'search_application/_types/AnalyticsEvent.ts#L22-L26', 'search_application._types.SearchApplication': 'search_application/_types/SearchApplication.ts#L24-L45', 'search_application._types.SearchApplicationTemplate': 'search_application/_types/SearchApplication.ts#L47-L52', -'search_application.delete.Request': 'search_application/delete/SearchApplicationsDeleteRequest.ts#L22-L36', +'search_application.delete.Request': 'search_application/delete/SearchApplicationsDeleteRequest.ts#L22-L38', 'search_application.delete.Response': 'search_application/delete/SearchApplicationsDeleteResponse.ts#L22-L24', 'search_application.delete_behavioral_analytics.Request': 'search_application/delete_behavioral_analytics/BehavioralAnalyticsDeleteRequest.ts#L22-L37', 'search_application.delete_behavioral_analytics.Response': 'search_application/delete_behavioral_analytics/BehavioralAnalyticsDeleteResponse.ts#L22-L24', -'search_application.get.Request': 'search_application/get/SearchApplicationsGetRequest.ts#L22-L35', +'search_application.get.Request': 'search_application/get/SearchApplicationsGetRequest.ts#L22-L36', 'search_application.get.Response': 'search_application/get/SearchApplicationsGetResponse.ts#L22-L24', 'search_application.get_behavioral_analytics.Request': 'search_application/get_behavioral_analytics/BehavioralAnalyticsGetRequest.ts#L22-L36', 'search_application.get_behavioral_analytics.Response': 'search_application/get_behavioral_analytics/BehavioralAnalyticsGetResponse.ts#L24-L27', -'search_application.list.Request': 'search_application/list/SearchApplicationsListRequest.ts#L22-L44', +'search_application.list.Request': 'search_application/list/SearchApplicationsListRequest.ts#L22-L46', 'search_application.list.Response': 'search_application/list/SearchApplicationsListResponse.ts#L24-L29', 'search_application.list.SearchApplicationListItem': 'search_application/list/SearchApplicationsListResponse.ts#L31-L48', -'search_application.put.Request': 'search_application/put/SearchApplicationsPutRequest.ts#L23-L48', +'search_application.post_behavioral_analytics_event.Request': 'search_application/post_behavioral_analytics_event/BehavioralAnalyticsEventPostRequest.ts#L24-L51', +'search_application.post_behavioral_analytics_event.Response': 'search_application/post_behavioral_analytics_event/BehavioralAnalyticsEventPostResponse.ts#L22-L47', +'search_application.put.Request': 'search_application/put/SearchApplicationsPutRequest.ts#L23-L50', 'search_application.put.Response': 'search_application/put/SearchApplicationsPutResponse.ts#L22-L26', 'search_application.put_behavioral_analytics.AnalyticsAcknowledgeResponseBase': 'search_application/put_behavioral_analytics/BehavioralAnalyticsPutResponse.ts#L27-L32', 'search_application.put_behavioral_analytics.Request': 'search_application/put_behavioral_analytics/BehavioralAnalyticsPutRequest.ts#L22-L36', 'search_application.put_behavioral_analytics.Response': 'search_application/put_behavioral_analytics/BehavioralAnalyticsPutResponse.ts#L23-L25', +'search_application.render_query.Request': 'search_application/render_query/SearchApplicationsRenderQueryRequest.ts#L24-L48', +'search_application.render_query.Response': 'search_application/render_query/SearchApplicationsRenderQueryResponse.ts#L20-L22', 'search_application.search.Request': 'search_application/search/SearchApplicationsSearchRequest.ts#L24-L54', 'search_application.search.Response': 'search_application/search/SearchApplicationsSearchResponse.ts#L22-L24', 'searchable_snapshots._types.StatsLevel': 'searchable_snapshots/_types/stats.ts#L20-L24', 'searchable_snapshots.cache_stats.Node': 'searchable_snapshots/cache_stats/Response.ts#L30-L32', -'searchable_snapshots.cache_stats.Request': 'searchable_snapshots/cache_stats/Request.ts#L24-L35', +'searchable_snapshots.cache_stats.Request': 'searchable_snapshots/cache_stats/Request.ts#L24-L38', 'searchable_snapshots.cache_stats.Response': 'searchable_snapshots/cache_stats/Response.ts#L24-L28', 'searchable_snapshots.cache_stats.Shared': 'searchable_snapshots/cache_stats/Response.ts#L34-L43', -'searchable_snapshots.clear_cache.Request': 'searchable_snapshots/clear_cache/SearchableSnapshotsClearCacheRequest.ts#L23-L38', +'searchable_snapshots.clear_cache.Request': 'searchable_snapshots/clear_cache/SearchableSnapshotsClearCacheRequest.ts#L23-L42', 'searchable_snapshots.clear_cache.Response': 'searchable_snapshots/clear_cache/SearchableSnapshotsClearCacheResponse.ts#L22-L25', 'searchable_snapshots.mount.MountedSnapshot': 'searchable_snapshots/mount/types.ts#L23-L27', -'searchable_snapshots.mount.Request': 'searchable_snapshots/mount/SearchableSnapshotsMountRequest.ts#L26-L49', +'searchable_snapshots.mount.Request': 'searchable_snapshots/mount/SearchableSnapshotsMountRequest.ts#L26-L55', 'searchable_snapshots.mount.Response': 'searchable_snapshots/mount/SearchableSnapshotsMountResponse.ts#L22-L26', -'searchable_snapshots.stats.Request': 'searchable_snapshots/stats/SearchableSnapshotsStatsRequest.ts#L24-L35', +'searchable_snapshots.stats.Request': 'searchable_snapshots/stats/SearchableSnapshotsStatsRequest.ts#L24-L38', 'searchable_snapshots.stats.Response': 'searchable_snapshots/stats/SearchableSnapshotsStatsResponse.ts#L22-L27', 'security._types.Access': 'security/_types/Access.ts#L22-L31', 'security._types.ApiKey': 'security/_types/ApiKey.ts#L27-L113', @@ -2454,6 +2473,8 @@ 'security.bulk_delete_role.Response': 'security/bulk_delete_role/SecurityBulkDeleteRoleResponse.ts#L22-L37', 'security.bulk_put_role.Request': 'security/bulk_put_role/SecurityBulkPutRoleRequest.ts#L25-L45', 'security.bulk_put_role.Response': 'security/bulk_put_role/SecurityBulkPutRoleResponse.ts#L22-L41', +'security.bulk_update_api_keys.Request': 'security/bulk_update_api_keys/SecurityBulkUpdateApiKeysRequest.ts#L26-L77', +'security.bulk_update_api_keys.Response': 'security/bulk_update_api_keys/SecurityBulkUpdateApiKeysResponse.ts#L22-L28', 'security.change_password.Request': 'security/change_password/SecurityChangePasswordRequest.ts#L23-L54', 'security.change_password.Response': 'security/change_password/SecurityChangePasswordResponse.ts#L20-L22', 'security.clear_api_key_cache.Request': 'security/clear_api_key_cache/SecurityClearApiKeyCacheRequest.ts#L23-L42', @@ -2473,6 +2494,10 @@ 'security.create_service_token.Request': 'security/create_service_token/CreateServiceTokenRequest.ts#L23-L41', 'security.create_service_token.Response': 'security/create_service_token/CreateServiceTokenResponse.ts#L22-L27', 'security.create_service_token.Token': 'security/create_service_token/types.ts#L22-L25', +'security.delegate_pki.Authentication': 'security/delegate_pki/SecurityDelegatePkiResponse.ts#L43-L55', +'security.delegate_pki.AuthenticationRealm': 'security/delegate_pki/SecurityDelegatePkiResponse.ts#L57-L61', +'security.delegate_pki.Request': 'security/delegate_pki/SecurityDelegatePkiRequest.ts#L22-L50', +'security.delegate_pki.Response': 'security/delegate_pki/SecurityDelegatePkiResponse.ts#L24-L41', 'security.delete_privileges.FoundStatus': 'security/delete_privileges/types.ts#L20-L22', 'security.delete_privileges.Request': 'security/delete_privileges/SecurityDeletePrivilegesRequest.ts#L23-L38', 'security.delete_privileges.Response': 'security/delete_privileges/SecurityDeletePrivilegesResponse.ts#L23-L26', @@ -2544,6 +2569,12 @@ 'security.invalidate_api_key.Response': 'security/invalidate_api_key/SecurityInvalidateApiKeyResponse.ts#L23-L30', 'security.invalidate_token.Request': 'security/invalidate_token/SecurityInvalidateTokenRequest.ts#L23-L43', 'security.invalidate_token.Response': 'security/invalidate_token/SecurityInvalidateTokenResponse.ts#L23-L30', +'security.oidc_authenticate.Request': 'security/oidc_authenticate/Request.ts#L22-L54', +'security.oidc_authenticate.Response': 'security/oidc_authenticate/Response.ts#L22-L41', +'security.oidc_logout.Request': 'security/oidc_logout/Request.ts#L22-L45', +'security.oidc_logout.Response': 'security/oidc_logout/Response.ts#L20-L27', +'security.oidc_prepare_authentication.Request': 'security/oidc_prepare_authentication/Request.ts#L22-L64', +'security.oidc_prepare_authentication.Response': 'security/oidc_prepare_authentication/Response.ts#L20-L30', 'security.put_privileges.Actions': 'security/put_privileges/types.ts#L22-L27', 'security.put_privileges.Request': 'security/put_privileges/SecurityPutPrivilegesRequest.ts#L25-L38', 'security.put_privileges.Response': 'security/put_privileges/SecurityPutPrivilegesResponse.ts#L23-L26', @@ -2589,17 +2620,17 @@ 'security.update_user_profile_data.Request': 'security/update_user_profile_data/Request.ts#L27-L72', 'security.update_user_profile_data.Response': 'security/update_user_profile_data/Response.ts#L22-L24', 'shutdown._types.Type': 'shutdown/_types/types.ts#L20-L24', -'shutdown.delete_node.Request': 'shutdown/delete_node/ShutdownDeleteNodeRequest.ts#L24-L44', +'shutdown.delete_node.Request': 'shutdown/delete_node/ShutdownDeleteNodeRequest.ts#L24-L55', 'shutdown.delete_node.Response': 'shutdown/delete_node/ShutdownDeleteNodeResponse.ts#L22-L24', 'shutdown.get_node.NodeShutdownStatus': 'shutdown/get_node/ShutdownGetNodeResponse.ts#L29-L38', 'shutdown.get_node.PersistentTaskStatus': 'shutdown/get_node/ShutdownGetNodeResponse.ts#L56-L58', 'shutdown.get_node.PluginsStatus': 'shutdown/get_node/ShutdownGetNodeResponse.ts#L60-L62', -'shutdown.get_node.Request': 'shutdown/get_node/ShutdownGetNodeRequest.ts#L24-L44', +'shutdown.get_node.Request': 'shutdown/get_node/ShutdownGetNodeRequest.ts#L24-L54', 'shutdown.get_node.Response': 'shutdown/get_node/ShutdownGetNodeResponse.ts#L23-L27', 'shutdown.get_node.ShardMigrationStatus': 'shutdown/get_node/ShutdownGetNodeResponse.ts#L52-L54', 'shutdown.get_node.ShutdownStatus': 'shutdown/get_node/ShutdownGetNodeResponse.ts#L45-L50', 'shutdown.get_node.ShutdownType': 'shutdown/get_node/ShutdownGetNodeResponse.ts#L40-L43', -'shutdown.put_node.Request': 'shutdown/put_node/ShutdownPutNodeRequest.ts#L25-L76', +'shutdown.put_node.Request': 'shutdown/put_node/ShutdownPutNodeRequest.ts#L25-L102', 'shutdown.put_node.Response': 'shutdown/put_node/ShutdownPutNodeResponse.ts#L22-L24', 'slm._types.Configuration': 'slm/_types/SnapshotLifecycle.ts#L99-L129', 'slm._types.InProgress': 'slm/_types/SnapshotLifecycle.ts#L131-L136', @@ -2608,23 +2639,23 @@ 'slm._types.Retention': 'slm/_types/SnapshotLifecycle.ts#L84-L97', 'slm._types.SnapshotLifecycle': 'slm/_types/SnapshotLifecycle.ts#L38-L49', 'slm._types.Statistics': 'slm/_types/SnapshotLifecycle.ts#L51-L74', -'slm.delete_lifecycle.Request': 'slm/delete_lifecycle/DeleteSnapshotLifecycleRequest.ts#L23-L32', +'slm.delete_lifecycle.Request': 'slm/delete_lifecycle/DeleteSnapshotLifecycleRequest.ts#L24-L49', 'slm.delete_lifecycle.Response': 'slm/delete_lifecycle/DeleteSnapshotLifecycleResponse.ts#L22-L24', -'slm.execute_lifecycle.Request': 'slm/execute_lifecycle/ExecuteSnapshotLifecycleRequest.ts#L23-L32', +'slm.execute_lifecycle.Request': 'slm/execute_lifecycle/ExecuteSnapshotLifecycleRequest.ts#L24-L49', 'slm.execute_lifecycle.Response': 'slm/execute_lifecycle/ExecuteSnapshotLifecycleResponse.ts#L22-L24', -'slm.execute_retention.Request': 'slm/execute_retention/ExecuteRetentionRequest.ts#L22-L27', +'slm.execute_retention.Request': 'slm/execute_retention/ExecuteRetentionRequest.ts#L23-L45', 'slm.execute_retention.Response': 'slm/execute_retention/ExecuteRetentionResponse.ts#L22-L24', -'slm.get_lifecycle.Request': 'slm/get_lifecycle/GetSnapshotLifecycleRequest.ts#L23-L32', +'slm.get_lifecycle.Request': 'slm/get_lifecycle/GetSnapshotLifecycleRequest.ts#L24-L51', 'slm.get_lifecycle.Response': 'slm/get_lifecycle/GetSnapshotLifecycleResponse.ts#L24-L27', -'slm.get_stats.Request': 'slm/get_stats/GetSnapshotLifecycleStatsRequest.ts#L22-L27', +'slm.get_stats.Request': 'slm/get_stats/GetSnapshotLifecycleStatsRequest.ts#L23-L44', 'slm.get_stats.Response': 'slm/get_stats/GetSnapshotLifecycleStatsResponse.ts#L23-L36', -'slm.get_status.Request': 'slm/get_status/GetSnapshotLifecycleManagementStatusRequest.ts#L22-L27', +'slm.get_status.Request': 'slm/get_status/GetSnapshotLifecycleManagementStatusRequest.ts#L23-L43', 'slm.get_status.Response': 'slm/get_status/GetSnapshotLifecycleManagementStatusResponse.ts#L22-L24', -'slm.put_lifecycle.Request': 'slm/put_lifecycle/PutSnapshotLifecycleRequest.ts#L26-L72', +'slm.put_lifecycle.Request': 'slm/put_lifecycle/PutSnapshotLifecycleRequest.ts#L26-L78', 'slm.put_lifecycle.Response': 'slm/put_lifecycle/PutSnapshotLifecycleResponse.ts#L22-L24', -'slm.start.Request': 'slm/start/StartSnapshotLifecycleManagementRequest.ts#L22-L27', +'slm.start.Request': 'slm/start/StartSnapshotLifecycleManagementRequest.ts#L23-L45', 'slm.start.Response': 'slm/start/StartSnapshotLifecycleManagementResponse.ts#L22-L24', -'slm.stop.Request': 'slm/stop/StopSnapshotLifecycleManagementRequest.ts#L22-L27', +'slm.stop.Request': 'slm/stop/StopSnapshotLifecycleManagementRequest.ts#L23-L49', 'slm.stop.Response': 'slm/stop/StopSnapshotLifecycleManagementResponse.ts#L22-L24', 'snapshot._types.AzureRepository': 'snapshot/_types/SnapshotRepository.ts#L40-L43', 'snapshot._types.AzureRepositorySettings': 'snapshot/_types/SnapshotRepository.ts#L77-L83', @@ -2656,32 +2687,32 @@ 'snapshot._types.SourceOnlyRepositorySettings': 'snapshot/_types/SnapshotRepository.ts#L117-L124', 'snapshot._types.Status': 'snapshot/_types/SnapshotStatus.ts#L26-L35', 'snapshot.cleanup_repository.CleanupRepositoryResults': 'snapshot/cleanup_repository/SnapshotCleanupRepositoryResponse.ts#L29-L34', -'snapshot.cleanup_repository.Request': 'snapshot/cleanup_repository/SnapshotCleanupRepositoryRequest.ts#L24-L49', +'snapshot.cleanup_repository.Request': 'snapshot/cleanup_repository/SnapshotCleanupRepositoryRequest.ts#L24-L53', 'snapshot.cleanup_repository.Response': 'snapshot/cleanup_repository/SnapshotCleanupRepositoryResponse.ts#L22-L27', -'snapshot.clone.Request': 'snapshot/clone/SnapshotCloneRequest.ts#L24-L42', +'snapshot.clone.Request': 'snapshot/clone/SnapshotCloneRequest.ts#L24-L46', 'snapshot.clone.Response': 'snapshot/clone/SnapshotCloneResponse.ts#L22-L24', -'snapshot.create.Request': 'snapshot/create/SnapshotCreateRequest.ts#L24-L81', +'snapshot.create.Request': 'snapshot/create/SnapshotCreateRequest.ts#L24-L86', 'snapshot.create.Response': 'snapshot/create/SnapshotCreateResponse.ts#L22-L35', -'snapshot.create_repository.Request': 'snapshot/create_repository/SnapshotCreateRepositoryRequest.ts#L25-L42', +'snapshot.create_repository.Request': 'snapshot/create_repository/SnapshotCreateRepositoryRequest.ts#L25-L48', 'snapshot.create_repository.Response': 'snapshot/create_repository/SnapshotCreateRepositoryResponse.ts#L22-L24', -'snapshot.delete.Request': 'snapshot/delete/SnapshotDeleteRequest.ts#L24-L37', +'snapshot.delete.Request': 'snapshot/delete/SnapshotDeleteRequest.ts#L24-L40', 'snapshot.delete.Response': 'snapshot/delete/SnapshotDeleteResponse.ts#L22-L24', -'snapshot.delete_repository.Request': 'snapshot/delete_repository/SnapshotDeleteRepositoryRequest.ts#L24-L38', +'snapshot.delete_repository.Request': 'snapshot/delete_repository/SnapshotDeleteRepositoryRequest.ts#L24-L43', 'snapshot.delete_repository.Response': 'snapshot/delete_repository/SnapshotDeleteRepositoryResponse.ts#L22-L24', -'snapshot.get.Request': 'snapshot/get/SnapshotGetRequest.ts#L27-L127', +'snapshot.get.Request': 'snapshot/get/SnapshotGetRequest.ts#L27-L130', 'snapshot.get.Response': 'snapshot/get/SnapshotGetResponse.ts#L25-L42', 'snapshot.get.SnapshotResponseItem': 'snapshot/get/SnapshotGetResponse.ts#L44-L48', -'snapshot.get_repository.Request': 'snapshot/get_repository/SnapshotGetRepositoryRequest.ts#L24-L38', +'snapshot.get_repository.Request': 'snapshot/get_repository/SnapshotGetRepositoryRequest.ts#L24-L41', 'snapshot.get_repository.Response': 'snapshot/get_repository/SnapshotGetRepositoryResponse.ts#L23-L25', -'snapshot.repository_verify_integrity.Request': 'snapshot/repository_verify_integrity/SnapshotRepositoryVerifyIntegrityRequest.ts#L24-L43', +'snapshot.repository_verify_integrity.Request': 'snapshot/repository_verify_integrity/SnapshotRepositoryVerifyIntegrityRequest.ts#L24-L73', 'snapshot.repository_verify_integrity.Response': 'snapshot/repository_verify_integrity/SnapshotRepositoryVerifyIntegrityResponse.ts#L22-L24', -'snapshot.restore.Request': 'snapshot/restore/SnapshotRestoreRequest.ts#L25-L51', +'snapshot.restore.Request': 'snapshot/restore/SnapshotRestoreRequest.ts#L25-L72', 'snapshot.restore.Response': 'snapshot/restore/SnapshotRestoreResponse.ts#L23-L28', 'snapshot.restore.SnapshotRestore': 'snapshot/restore/SnapshotRestoreResponse.ts#L30-L34', -'snapshot.status.Request': 'snapshot/status/SnapshotStatusRequest.ts#L24-L38', +'snapshot.status.Request': 'snapshot/status/SnapshotStatusRequest.ts#L24-L51', 'snapshot.status.Response': 'snapshot/status/SnapshotStatusResponse.ts#L22-L24', 'snapshot.verify_repository.CompactNodeInfo': 'snapshot/verify_repository/SnapshotVerifyRepositoryResponse.ts#L27-L29', -'snapshot.verify_repository.Request': 'snapshot/verify_repository/SnapshotVerifyRepositoryRequest.ts#L24-L38', +'snapshot.verify_repository.Request': 'snapshot/verify_repository/SnapshotVerifyRepositoryRequest.ts#L24-L43', 'snapshot.verify_repository.Response': 'snapshot/verify_repository/SnapshotVerifyRepositoryResponse.ts#L23-L25', 'sql.Column': 'sql/types.ts#L23-L26', 'sql.clear_cursor.Request': 'sql/clear_cursor/ClearSqlCursorRequest.ts#L22-L35', @@ -2721,18 +2752,26 @@ 'tasks._types.GroupBy': 'tasks/_types/GroupBy.ts#L20-L27', 'tasks._types.NodeTasks': 'tasks/_types/TaskListResponseBase.ts#L49-L57', 'tasks._types.ParentTaskInfo': 'tasks/_types/TaskListResponseBase.ts#L45-L47', -'tasks._types.TaskInfo': 'tasks/_types/TaskInfo.ts#L32-L47', +'tasks._types.TaskInfo': 'tasks/_types/TaskInfo.ts#L32-L58', 'tasks._types.TaskInfos': 'tasks/_types/TaskListResponseBase.ts#L40-L43', 'tasks._types.TaskListResponseBase': 'tasks/_types/TaskListResponseBase.ts#L26-L38', -'tasks.cancel.Request': 'tasks/cancel/CancelTasksRequest.ts#L23-L51', +'tasks.cancel.Request': 'tasks/cancel/CancelTasksRequest.ts#L23-L59', 'tasks.cancel.Response': 'tasks/cancel/CancelTasksResponse.ts#L22-L24', 'tasks.get.Request': 'tasks/get/GetTaskRequest.ts#L24-L52', 'tasks.get.Response': 'tasks/get/GetTaskResponse.ts#L24-L31', -'tasks.list.Request': 'tasks/list/ListTasksRequest.ts#L25-L72', +'tasks.list.Request': 'tasks/list/ListTasksRequest.ts#L25-L74', 'tasks.list.Response': 'tasks/list/ListTasksResponse.ts#L22-L24', +'text_structure._types.EcsCompatibilityType': 'text_structure/_types/Structure.ts#L40-L43', +'text_structure._types.FieldStat': 'text_structure/_types/Structure.ts#L23-L33', +'text_structure._types.FormatType': 'text_structure/_types/Structure.ts#L45-L50', +'text_structure._types.TopHit': 'text_structure/_types/Structure.ts#L35-L38', +'text_structure.find_field_structure.Request': 'text_structure/find_field_structure/FindFieldStructureRequest.ts#L26-L162', +'text_structure.find_field_structure.Response': 'text_structure/find_field_structure/FindFieldStructureResponse.ts#L31-L49', +'text_structure.find_message_structure.Request': 'text_structure/find_message_structure/FindMessageStructureRequest.ts#L25-L163', +'text_structure.find_message_structure.Response': 'text_structure/find_message_structure/FindMessageStructureResponse.ts#L31-L49', 'text_structure.test_grok_pattern.MatchedField': 'text_structure/test_grok_pattern/types.ts#L23-L27', 'text_structure.test_grok_pattern.MatchedText': 'text_structure/test_grok_pattern/types.ts#L29-L32', -'text_structure.test_grok_pattern.Request': 'text_structure/test_grok_pattern/TestGrokPatternRequest.ts#L23-L44', +'text_structure.test_grok_pattern.Request': 'text_structure/test_grok_pattern/TestGrokPatternRequest.ts#L23-L48', 'text_structure.test_grok_pattern.Response': 'text_structure/test_grok_pattern/TestGrokPatternResponse.ts#L22-L26', 'transform._types.Destination': 'transform/_types/Transform.ts#L34-L45', 'transform._types.Latest': 'transform/_types/Transform.ts#L47-L52', @@ -2771,7 +2810,7 @@ 'transform.stop_transform.Response': 'transform/stop_transform/StopTransformResponse.ts#L22-L24', 'transform.update_transform.Request': 'transform/update_transform/UpdateTransformRequest.ts#L31-L106', 'transform.update_transform.Response': 'transform/update_transform/UpdateTransformResponse.ts#L33-L51', -'transform.upgrade_transforms.Request': 'transform/upgrade_transforms/UpgradeTransformsRequest.ts#L23-L49', +'transform.upgrade_transforms.Request': 'transform/upgrade_transforms/UpgradeTransformsRequest.ts#L23-L57', 'transform.upgrade_transforms.Response': 'transform/upgrade_transforms/UpgradeTransformsResponse.ts#L25-L34', 'watcher._types.AcknowledgeState': 'watcher/_types/Action.ts#L109-L112', 'watcher._types.AcknowledgementOptions': 'watcher/_types/Action.ts#L103-L107', @@ -2865,42 +2904,42 @@ 'watcher._types.WatchStatus': 'watcher/_types/Watch.ts#L49-L56', 'watcher._types.WebhookAction': 'watcher/_types/Actions.ts#L293-L293', 'watcher._types.WebhookResult': 'watcher/_types/Actions.ts#L295-L298', -'watcher.ack_watch.Request': 'watcher/ack_watch/WatcherAckWatchRequest.ts#L23-L32', +'watcher.ack_watch.Request': 'watcher/ack_watch/WatcherAckWatchRequest.ts#L23-L40', 'watcher.ack_watch.Response': 'watcher/ack_watch/WatcherAckWatchResponse.ts#L22-L24', -'watcher.activate_watch.Request': 'watcher/activate_watch/WatcherActivateWatchRequest.ts#L23-L31', +'watcher.activate_watch.Request': 'watcher/activate_watch/WatcherActivateWatchRequest.ts#L23-L35', 'watcher.activate_watch.Response': 'watcher/activate_watch/WatcherActivateWatchResponse.ts#L22-L24', -'watcher.deactivate_watch.Request': 'watcher/deactivate_watch/DeactivateWatchRequest.ts#L23-L31', +'watcher.deactivate_watch.Request': 'watcher/deactivate_watch/DeactivateWatchRequest.ts#L23-L35', 'watcher.deactivate_watch.Response': 'watcher/deactivate_watch/DeactivateWatchResponse.ts#L22-L24', -'watcher.delete_watch.Request': 'watcher/delete_watch/DeleteWatchRequest.ts#L23-L31', +'watcher.delete_watch.Request': 'watcher/delete_watch/DeleteWatchRequest.ts#L23-L40', 'watcher.delete_watch.Response': 'watcher/delete_watch/DeleteWatchResponse.ts#L22-L24', -'watcher.execute_watch.Request': 'watcher/execute_watch/WatcherExecuteWatchRequest.ts#L28-L79', +'watcher.execute_watch.Request': 'watcher/execute_watch/WatcherExecuteWatchRequest.ts#L28-L86', 'watcher.execute_watch.Response': 'watcher/execute_watch/WatcherExecuteWatchResponse.ts#L23-L25', 'watcher.execute_watch.WatchRecord': 'watcher/execute_watch/types.ts#L27-L39', -'watcher.get_watch.Request': 'watcher/get_watch/GetWatchRequest.ts#L23-L31', +'watcher.get_watch.Request': 'watcher/get_watch/GetWatchRequest.ts#L23-L33', 'watcher.get_watch.Response': 'watcher/get_watch/GetWatchResponse.ts#L24-L34', -'watcher.put_watch.Request': 'watcher/put_watch/WatcherPutWatchRequest.ts#L30-L53', +'watcher.put_watch.Request': 'watcher/put_watch/WatcherPutWatchRequest.ts#L30-L66', 'watcher.put_watch.Response': 'watcher/put_watch/WatcherPutWatchResponse.ts#L23-L31', -'watcher.query_watches.Request': 'watcher/query_watches/WatcherQueryWatchesRequest.ts#L25-L48', +'watcher.query_watches.Request': 'watcher/query_watches/WatcherQueryWatchesRequest.ts#L25-L51', 'watcher.query_watches.Response': 'watcher/query_watches/WatcherQueryWatchesResponse.ts#L23-L28', -'watcher.start.Request': 'watcher/start/WatcherStartRequest.ts#L22-L26', +'watcher.start.Request': 'watcher/start/WatcherStartRequest.ts#L22-L29', 'watcher.start.Response': 'watcher/start/WatcherStartResponse.ts#L22-L24', -'watcher.stats.Request': 'watcher/stats/WatcherStatsRequest.ts#L23-L45', +'watcher.stats.Request': 'watcher/stats/WatcherStatsRequest.ts#L23-L47', 'watcher.stats.Response': 'watcher/stats/WatcherStatsResponse.ts#L24-L32', 'watcher.stats.WatchRecordQueuedStats': 'watcher/stats/types.ts#L50-L52', 'watcher.stats.WatchRecordStats': 'watcher/stats/types.ts#L54-L60', 'watcher.stats.WatcherMetric': 'watcher/stats/types.ts#L42-L48', 'watcher.stats.WatcherNodeStats': 'watcher/stats/types.ts#L33-L40', 'watcher.stats.WatcherState': 'watcher/stats/types.ts#L26-L31', -'watcher.stop.Request': 'watcher/stop/WatcherStopRequest.ts#L22-L26', +'watcher.stop.Request': 'watcher/stop/WatcherStopRequest.ts#L22-L29', 'watcher.stop.Response': 'watcher/stop/WatcherStopResponse.ts#L22-L24', 'xpack.info.BuildInformation': 'xpack/info/types.ts#L24-L27', 'xpack.info.Feature': 'xpack/info/types.ts#L85-L90', 'xpack.info.Features': 'xpack/info/types.ts#L42-L83', 'xpack.info.MinimalLicenseInformation': 'xpack/info/types.ts#L34-L40', 'xpack.info.NativeCodeInformation': 'xpack/info/types.ts#L29-L32', -'xpack.info.Request': 'xpack/info/XPackInfoRequest.ts#L22-L42', +'xpack.info.Request': 'xpack/info/XPackInfoRequest.ts#L22-L47', 'xpack.info.Response': 'xpack/info/XPackInfoResponse.ts#L22-L29', -'xpack.info.XPackCategory': 'xpack/info/XPackInfoRequest.ts#L44-L48', +'xpack.info.XPackCategory': 'xpack/info/XPackInfoRequest.ts#L49-L53', 'xpack.usage.Analytics': 'xpack/usage/types.ts#L328-L330', 'xpack.usage.AnalyticsStatistics': 'xpack/usage/types.ts#L59-L69', 'xpack.usage.Archive': 'xpack/usage/types.ts#L46-L48', @@ -2945,7 +2984,7 @@ 'xpack.usage.Query': 'xpack/usage/types.ts#L257-L262', 'xpack.usage.Realm': 'xpack/usage/types.ts#L415-L424', 'xpack.usage.RealmCache': 'xpack/usage/types.ts#L264-L266', -'xpack.usage.Request': 'xpack/usage/XPackUsageRequest.ts#L23-L38', +'xpack.usage.Request': 'xpack/usage/XPackUsageRequest.ts#L23-L40', 'xpack.usage.Response': 'xpack/usage/XPackUsageResponse.ts#L43-L79', 'xpack.usage.RoleMapping': 'xpack/usage/types.ts#L268-L271', 'xpack.usage.RuntimeFieldTypes': 'xpack/usage/types.ts#L273-L275', @@ -2972,10 +3011,10 @@ if (hash.length > 1) { hash = hash.substring(1); } - window.location = "https://github.com/elastic/elasticsearch-specification/tree/b8071e8a539550d8cbaee3bae954ed4cc98d8422/specification/" + (paths[hash] || ""); + window.location = "https://github.com/elastic/elasticsearch-specification/tree/2cf307e1760fa43857ef37e3b2ccd6e9a7ab5f03/specification/" + (paths[hash] || ""); - Please see the Elasticsearch API specification. + Please see the Elasticsearch API specification. diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/ilm/IlmPolicy.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/ilm/IlmPolicy.java index f0c8ce473..74253d987 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/ilm/IlmPolicy.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/ilm/IlmPolicy.java @@ -86,6 +86,9 @@ public final Phases phases() { } /** + * Arbitrary metadata that is not automatically generated or used by + * Elasticsearch. + *

* API name: {@code _meta} */ public final Map meta() { @@ -153,6 +156,9 @@ public final Builder phases(Function> fn) } /** + * Arbitrary metadata that is not automatically generated or used by + * Elasticsearch. + *

* API name: {@code _meta} *

* Adds all entries of map to meta. @@ -163,6 +169,9 @@ public final Builder meta(Map map) { } /** + * Arbitrary metadata that is not automatically generated or used by + * Elasticsearch. + *

* API name: {@code _meta} *

* Adds an entry to meta. diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/ilm/MigrateToDataTiersResponse.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/ilm/MigrateToDataTiersResponse.java index c1c1b5f4c..247be7022 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/ilm/MigrateToDataTiersResponse.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/ilm/MigrateToDataTiersResponse.java @@ -107,42 +107,58 @@ public final boolean dryRun() { } /** - * Required - API name: {@code removed_legacy_template} + * Required - The name of the legacy index template that was deleted. This + * information is missing if no legacy index templates were deleted. + *

+ * API name: {@code removed_legacy_template} */ public final String removedLegacyTemplate() { return this.removedLegacyTemplate; } /** - * Required - API name: {@code migrated_ilm_policies} + * Required - The ILM policies that were updated. + *

+ * API name: {@code migrated_ilm_policies} */ public final List migratedIlmPolicies() { return this.migratedIlmPolicies; } /** - * Required - API name: {@code migrated_indices} + * Required - The indices that were migrated to tier preference routing. + *

+ * API name: {@code migrated_indices} */ public final List migratedIndices() { return this.migratedIndices; } /** - * Required - API name: {@code migrated_legacy_templates} + * Required - The legacy index templates that were updated to not contain custom + * routing settings for the provided data attribute. + *

+ * API name: {@code migrated_legacy_templates} */ public final List migratedLegacyTemplates() { return this.migratedLegacyTemplates; } /** - * Required - API name: {@code migrated_composable_templates} + * Required - The composable index templates that were updated to not contain + * custom routing settings for the provided data attribute. + *

+ * API name: {@code migrated_composable_templates} */ public final List migratedComposableTemplates() { return this.migratedComposableTemplates; } /** - * Required - API name: {@code migrated_component_templates} + * Required - The component templates that were updated to not contain custom + * routing settings for the provided data attribute. + *

+ * API name: {@code migrated_component_templates} */ public final List migratedComponentTemplates() { return this.migratedComponentTemplates; @@ -255,7 +271,10 @@ public final Builder dryRun(boolean value) { } /** - * Required - API name: {@code removed_legacy_template} + * Required - The name of the legacy index template that was deleted. This + * information is missing if no legacy index templates were deleted. + *

+ * API name: {@code removed_legacy_template} */ public final Builder removedLegacyTemplate(String value) { this.removedLegacyTemplate = value; @@ -263,7 +282,9 @@ public final Builder removedLegacyTemplate(String value) { } /** - * Required - API name: {@code migrated_ilm_policies} + * Required - The ILM policies that were updated. + *

+ * API name: {@code migrated_ilm_policies} *

* Adds all elements of list to migratedIlmPolicies. */ @@ -273,7 +294,9 @@ public final Builder migratedIlmPolicies(List list) { } /** - * Required - API name: {@code migrated_ilm_policies} + * Required - The ILM policies that were updated. + *

+ * API name: {@code migrated_ilm_policies} *

* Adds one or more values to migratedIlmPolicies. */ @@ -283,7 +306,9 @@ public final Builder migratedIlmPolicies(String value, String... values) { } /** - * Required - API name: {@code migrated_indices} + * Required - The indices that were migrated to tier preference routing. + *

+ * API name: {@code migrated_indices} *

* Adds all elements of list to migratedIndices. */ @@ -293,7 +318,9 @@ public final Builder migratedIndices(List list) { } /** - * Required - API name: {@code migrated_indices} + * Required - The indices that were migrated to tier preference routing. + *

+ * API name: {@code migrated_indices} *

* Adds one or more values to migratedIndices. */ @@ -303,7 +330,10 @@ public final Builder migratedIndices(String value, String... values) { } /** - * Required - API name: {@code migrated_legacy_templates} + * Required - The legacy index templates that were updated to not contain custom + * routing settings for the provided data attribute. + *

+ * API name: {@code migrated_legacy_templates} *

* Adds all elements of list to * migratedLegacyTemplates. @@ -314,7 +344,10 @@ public final Builder migratedLegacyTemplates(List list) { } /** - * Required - API name: {@code migrated_legacy_templates} + * Required - The legacy index templates that were updated to not contain custom + * routing settings for the provided data attribute. + *

+ * API name: {@code migrated_legacy_templates} *

* Adds one or more values to migratedLegacyTemplates. */ @@ -324,7 +357,10 @@ public final Builder migratedLegacyTemplates(String value, String... values) { } /** - * Required - API name: {@code migrated_composable_templates} + * Required - The composable index templates that were updated to not contain + * custom routing settings for the provided data attribute. + *

+ * API name: {@code migrated_composable_templates} *

* Adds all elements of list to * migratedComposableTemplates. @@ -335,7 +371,10 @@ public final Builder migratedComposableTemplates(List list) { } /** - * Required - API name: {@code migrated_composable_templates} + * Required - The composable index templates that were updated to not contain + * custom routing settings for the provided data attribute. + *

+ * API name: {@code migrated_composable_templates} *

* Adds one or more values to migratedComposableTemplates. */ @@ -345,7 +384,10 @@ public final Builder migratedComposableTemplates(String value, String... values) } /** - * Required - API name: {@code migrated_component_templates} + * Required - The component templates that were updated to not contain custom + * routing settings for the provided data attribute. + *

+ * API name: {@code migrated_component_templates} *

* Adds all elements of list to * migratedComponentTemplates. @@ -356,7 +398,10 @@ public final Builder migratedComponentTemplates(List list) { } /** - * Required - API name: {@code migrated_component_templates} + * Required - The component templates that were updated to not contain custom + * routing settings for the provided data attribute. + *

+ * API name: {@code migrated_component_templates} *

* Adds one or more values to migratedComponentTemplates. */ diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/ilm/MoveToStepRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/ilm/MoveToStepRequest.java index 8dafaf75c..61fb418b2 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/ilm/MoveToStepRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/ilm/MoveToStepRequest.java @@ -106,7 +106,9 @@ public static MoveToStepRequest of(Function + * API name: {@code current_step} */ public final StepKey currentStep() { return this.currentStep; @@ -122,7 +124,9 @@ public final String index() { } /** - * Required - API name: {@code next_step} + * Required - The step that you want to run. + *

+ * API name: {@code next_step} */ public final StepKey nextStep() { return this.nextStep; @@ -163,7 +167,9 @@ public static class Builder extends RequestBase.AbstractBuilder private StepKey nextStep; /** - * Required - API name: {@code current_step} + * Required - The step that the index is expected to be in. + *

+ * API name: {@code current_step} */ public final Builder currentStep(StepKey value) { this.currentStep = value; @@ -171,7 +177,9 @@ public final Builder currentStep(StepKey value) { } /** - * Required - API name: {@code current_step} + * Required - The step that the index is expected to be in. + *

+ * API name: {@code current_step} */ public final Builder currentStep(Function> fn) { return this.currentStep(fn.apply(new StepKey.Builder()).build()); @@ -188,7 +196,9 @@ public final Builder index(String value) { } /** - * Required - API name: {@code next_step} + * Required - The step that you want to run. + *

+ * API name: {@code next_step} */ public final Builder nextStep(StepKey value) { this.nextStep = value; @@ -196,7 +206,9 @@ public final Builder nextStep(StepKey value) { } /** - * Required - API name: {@code next_step} + * Required - The step that you want to run. + *

+ * API name: {@code next_step} */ public final Builder nextStep(Function> fn) { return this.nextStep(fn.apply(new StepKey.Builder()).build()); diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/ilm/StartIlmRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/ilm/StartIlmRequest.java index 78af2f967..65400cf26 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/ilm/StartIlmRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/ilm/StartIlmRequest.java @@ -85,6 +85,8 @@ public static StartIlmRequest of(Function * API name: {@code master_timeout} */ @Nullable @@ -93,6 +95,8 @@ public final Time masterTimeout() { } /** + * Explicit operation timeout + *

* API name: {@code timeout} */ @Nullable @@ -114,6 +118,8 @@ public static class Builder extends RequestBase.AbstractBuilder impleme private Time timeout; /** + * Explicit operation timeout for connection to master node + *

* API name: {@code master_timeout} */ public final Builder masterTimeout(@Nullable Time value) { @@ -122,6 +128,8 @@ public final Builder masterTimeout(@Nullable Time value) { } /** + * Explicit operation timeout for connection to master node + *

* API name: {@code master_timeout} */ public final Builder masterTimeout(Function> fn) { @@ -129,6 +137,8 @@ public final Builder masterTimeout(Function> f } /** + * Explicit operation timeout + *

* API name: {@code timeout} */ public final Builder timeout(@Nullable Time value) { @@ -137,6 +147,8 @@ public final Builder timeout(@Nullable Time value) { } /** + * Explicit operation timeout + *

* API name: {@code timeout} */ public final Builder timeout(Function> fn) { diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/ilm/StopIlmRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/ilm/StopIlmRequest.java index 7f3ebe90d..8c1b051a5 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/ilm/StopIlmRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/ilm/StopIlmRequest.java @@ -90,6 +90,8 @@ public static StopIlmRequest of(Function> } /** + * Explicit operation timeout for connection to master node + *

* API name: {@code master_timeout} */ @Nullable @@ -98,6 +100,8 @@ public final Time masterTimeout() { } /** + * Explicit operation timeout + *

* API name: {@code timeout} */ @Nullable @@ -119,6 +123,8 @@ public static class Builder extends RequestBase.AbstractBuilder impleme private Time timeout; /** + * Explicit operation timeout for connection to master node + *

* API name: {@code master_timeout} */ public final Builder masterTimeout(@Nullable Time value) { @@ -127,6 +133,8 @@ public final Builder masterTimeout(@Nullable Time value) { } /** + * Explicit operation timeout for connection to master node + *

* API name: {@code master_timeout} */ public final Builder masterTimeout(Function> fn) { @@ -134,6 +142,8 @@ public final Builder masterTimeout(Function> f } /** + * Explicit operation timeout + *

* API name: {@code timeout} */ public final Builder timeout(@Nullable Time value) { @@ -142,6 +152,8 @@ public final Builder timeout(@Nullable Time value) { } /** + * Explicit operation timeout + *

* API name: {@code timeout} */ public final Builder timeout(Function> fn) { diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/ilm/move_to_step/StepKey.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/ilm/move_to_step/StepKey.java index f773f0fc3..d019c120f 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/ilm/move_to_step/StepKey.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/ilm/move_to_step/StepKey.java @@ -82,6 +82,8 @@ public static StepKey of(Function> fn) { } /** + * The optional action to which the index will be moved. + *

* API name: {@code action} */ @Nullable @@ -90,6 +92,8 @@ public final String action() { } /** + * The optional step name to which the index will be moved. + *

* API name: {@code name} */ @Nullable @@ -151,6 +155,8 @@ public static class Builder extends WithJsonObjectBuilderBase implement private String phase; /** + * The optional action to which the index will be moved. + *

* API name: {@code action} */ public final Builder action(@Nullable String value) { @@ -159,6 +165,8 @@ public final Builder action(@Nullable String value) { } /** + * The optional step name to which the index will be moved. + *

* API name: {@code name} */ public final Builder name(@Nullable String value) { diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/AnalyzeRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/AnalyzeRequest.java index f25407a11..a9e251a9c 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/AnalyzeRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/AnalyzeRequest.java @@ -63,9 +63,14 @@ // typedef: indices.analyze.Request /** - * Get tokens from text analysis. The analyze API performs analysis - * on a text string and returns the resulting tokens. + * Get tokens from text analysis. The analyze API performs analysis on a text + * string and returns the resulting tokens. + *

+ * Generating excessive amount of tokens may cause a node to run out of memory. + * The index.analyze.max_token_count setting enables you to limit + * the number of tokens that can be produced. If more than this limit of tokens + * gets generated, an error occurs. The _analyze endpoint without a + * specified index will always use 10000 as its limit. * * @see API * specification diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/ClearCacheRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/ClearCacheRequest.java index 589c3e312..ec70832cd 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/ClearCacheRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/ClearCacheRequest.java @@ -61,6 +61,11 @@ /** * Clear the cache. Clear the cache of one or more indices. For data streams, * the API clears the caches of the stream's backing indices. + *

+ * By default, the clear cache API clears all caches. To clear only specific + * caches, use the fielddata, query, or + * request parameters. To clear the cache only of specific fields, + * use the fields parameter. * * @see API * specification diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/CloneIndexRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/CloneIndexRequest.java index 9e25c3ccb..b6a4a2a54 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/CloneIndexRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/CloneIndexRequest.java @@ -89,13 +89,45 @@ * IMPORTANT: Indices can only be cloned if they meet the following * requirements: *

    + *
  • The index must be marked as read-only and have a cluster health status of + * green.
  • *
  • The target index must not exist.
  • *
  • The source index must have the same number of primary shards as the * target index.
  • *
  • The node handling the clone process must have sufficient free disk space * to accommodate a second copy of the existing index.
  • *
- * + *

+ * The current write index on a data stream cannot be cloned. In order to clone + * the current write index, the data stream must first be rolled over so that a + * new write index is created and then the previous write index can be cloned. + *

+ * NOTE: Mappings cannot be specified in the _clone request. The + * mappings of the source index will be used for the target index. + *

+ * Monitor the cloning process + *

+ * The cloning process can be monitored with the cat recovery API or the cluster + * health API can be used to wait until all primary shards have been allocated + * by setting the wait_for_status parameter to yellow. + *

+ * The _clone API returns as soon as the target index has been + * added to the cluster state, before any shards have been allocated. At this + * point, all shards are in the state unassigned. If, for any reason, the target + * index can't be allocated, its primary shard will remain unassigned until it + * can be allocated on that node. + *

+ * Once the primary shard is allocated, it moves to state initializing, and the + * clone process begins. When the clone operation completes, the shard will + * become active. At that point, Elasticsearch will try to allocate any replicas + * and may decide to relocate the primary shard to another node. + *

+ * Wait for active shards + *

+ * Because the clone operation creates a new index to clone the shards to, the + * wait for active shards setting on index creation applies to the clone index + * action as well. + * * @see API * specification */ diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/CreateIndexRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/CreateIndexRequest.java index 1a9e5011e..26fddd3fe 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/CreateIndexRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/CreateIndexRequest.java @@ -60,7 +60,38 @@ // typedef: indices.create.Request /** - * Create an index. Creates a new index. + * Create an index. You can use the create index API to add a new index to an + * Elasticsearch cluster. When creating an index, you can specify the following: + *

    + *
  • Settings for the index.
  • + *
  • Mappings for fields in the index.
  • + *
  • Index aliases
  • + *
+ *

+ * Wait for active shards + *

+ * By default, index creation will only return a response to the client when the + * primary copies of each shard have been started, or the request times out. The + * index creation response will indicate what happened. For example, + * acknowledged indicates whether the index was successfully + * created in the cluster, while shards_acknowledged indicates + * whether the requisite number of shard copies were started for each shard in + * the index before timing out. Note that it is still possible for either + * acknowledged or shards_acknowledged to be + * false, but for the index creation to be successful. These values + * simply indicate whether the operation completed before the timeout. If + * acknowledged is false, the request timed out before the cluster + * state was updated with the newly created index, but it probably will be + * created sometime soon. If shards_acknowledged is false, then the + * request timed out before the requisite number of shards were started (by + * default just the primaries), even if the cluster state was successfully + * updated to reflect the newly created index (that is to say, + * acknowledged is true). + *

+ * You can change the default of only waiting for the primary shards to start + * through the index setting index.write.wait_for_active_shards. + * Note that changing this setting will also affect the + * wait_for_active_shards value on all subsequent write operations. * * @see API * specification diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/DeleteIndexRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/DeleteIndexRequest.java index 65b550b03..e32305f49 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/DeleteIndexRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/DeleteIndexRequest.java @@ -60,7 +60,13 @@ // typedef: indices.delete.Request /** - * Delete indices. Deletes one or more indices. + * Delete indices. Deleting an index deletes its documents, shards, and + * metadata. It does not delete related Kibana components, such as data views, + * visualizations, or dashboards. + *

+ * You cannot delete the current write index of a data stream. To delete the + * index, you must roll over the data stream so a new write index is created. + * You can then use the delete index API to delete the previous write index. * * @see API * specification diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/DeleteTemplateRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/DeleteTemplateRequest.java index 0a796dac6..1e8ec4664 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/DeleteTemplateRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/DeleteTemplateRequest.java @@ -56,7 +56,7 @@ // typedef: indices.delete_template.Request /** - * Deletes a legacy index template. + * Delete a legacy index template. * * @see API * specification diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/DiskUsageRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/DiskUsageRequest.java index 3c026fcfa..8d702f847 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/DiskUsageRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/DiskUsageRequest.java @@ -63,6 +63,14 @@ * index or data stream. This API might not support indices created in previous * Elasticsearch versions. The result of a small index can be inaccurate as some * parts of an index might not be analyzed by the API. + *

+ * NOTE: The total size of fields of the analyzed shards of the index in the + * response is usually smaller than the index store_size value + * because some small metadata files are ignored and some parts of data files + * might not be scanned by the API. Since stored fields are stored together in a + * compressed format, the sizes of stored fields are also estimates and can be + * inaccurate. The stored size of the _id field is likely + * underestimated while the _source field is overestimated. * * @see API * specification diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/ElasticsearchIndicesAsyncClient.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/ElasticsearchIndicesAsyncClient.java index 0adc45bd5..b4081892e 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/ElasticsearchIndicesAsyncClient.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/ElasticsearchIndicesAsyncClient.java @@ -106,9 +106,14 @@ public final CompletableFuture addBlock( // ----- Endpoint: indices.analyze /** - * Get tokens from text analysis. The analyze API performs analysis - * on a text string and returns the resulting tokens. + * Get tokens from text analysis. The analyze API performs analysis on a text + * string and returns the resulting tokens. + *

+ * Generating excessive amount of tokens may cause a node to run out of memory. + * The index.analyze.max_token_count setting enables you to limit + * the number of tokens that can be produced. If more than this limit of tokens + * gets generated, an error occurs. The _analyze endpoint without a + * specified index will always use 10000 as its limit. * * @see Documentation @@ -123,9 +128,14 @@ public CompletableFuture analyze(AnalyzeRequest request) { } /** - * Get tokens from text analysis. The analyze API performs analysis - * on a text string and returns the resulting tokens. + * Get tokens from text analysis. The analyze API performs analysis on a text + * string and returns the resulting tokens. + *

+ * Generating excessive amount of tokens may cause a node to run out of memory. + * The index.analyze.max_token_count setting enables you to limit + * the number of tokens that can be produced. If more than this limit of tokens + * gets generated, an error occurs. The _analyze endpoint without a + * specified index will always use 10000 as its limit. * * @param fn * a function that initializes a builder to create the @@ -141,9 +151,14 @@ public final CompletableFuture analyze( } /** - * Get tokens from text analysis. The analyze API performs analysis - * on a text string and returns the resulting tokens. + * Get tokens from text analysis. The analyze API performs analysis on a text + * string and returns the resulting tokens. + *

+ * Generating excessive amount of tokens may cause a node to run out of memory. + * The index.analyze.max_token_count setting enables you to limit + * the number of tokens that can be produced. If more than this limit of tokens + * gets generated, an error occurs. The _analyze endpoint without a + * specified index will always use 10000 as its limit. * * @see Documentation @@ -160,6 +175,11 @@ public CompletableFuture analyze() { /** * Clear the cache. Clear the cache of one or more indices. For data streams, * the API clears the caches of the stream's backing indices. + *

+ * By default, the clear cache API clears all caches. To clear only specific + * caches, use the fielddata, query, or + * request parameters. To clear the cache only of specific fields, + * use the fields parameter. * * @see Documentation @@ -176,6 +196,11 @@ public CompletableFuture clearCache(ClearCacheRequest reques /** * Clear the cache. Clear the cache of one or more indices. For data streams, * the API clears the caches of the stream's backing indices. + *

+ * By default, the clear cache API clears all caches. To clear only specific + * caches, use the fielddata, query, or + * request parameters. To clear the cache only of specific fields, + * use the fields parameter. * * @param fn * a function that initializes a builder to create the @@ -193,6 +218,11 @@ public final CompletableFuture clearCache( /** * Clear the cache. Clear the cache of one or more indices. For data streams, * the API clears the caches of the stream's backing indices. + *

+ * By default, the clear cache API clears all caches. To clear only specific + * caches, use the fielddata, query, or + * request parameters. To clear the cache only of specific fields, + * use the fields parameter. * * @see Documentation @@ -236,13 +266,45 @@ public CompletableFuture clearCache() { * IMPORTANT: Indices can only be cloned if they meet the following * requirements: *

- * + *

+ * The current write index on a data stream cannot be cloned. In order to clone + * the current write index, the data stream must first be rolled over so that a + * new write index is created and then the previous write index can be cloned. + *

+ * NOTE: Mappings cannot be specified in the _clone request. The + * mappings of the source index will be used for the target index. + *

+ * Monitor the cloning process + *

+ * The cloning process can be monitored with the cat recovery API or the cluster + * health API can be used to wait until all primary shards have been allocated + * by setting the wait_for_status parameter to yellow. + *

+ * The _clone API returns as soon as the target index has been + * added to the cluster state, before any shards have been allocated. At this + * point, all shards are in the state unassigned. If, for any reason, the target + * index can't be allocated, its primary shard will remain unassigned until it + * can be allocated on that node. + *

+ * Once the primary shard is allocated, it moves to state initializing, and the + * clone process begins. When the clone operation completes, the shard will + * become active. At that point, Elasticsearch will try to allocate any replicas + * and may decide to relocate the primary shard to another node. + *

+ * Wait for active shards + *

+ * Because the clone operation creates a new index to clone the shards to, the + * wait for active shards setting on index creation applies to the clone index + * action as well. + * * @see Documentation * on elastic.co @@ -285,13 +347,45 @@ public CompletableFuture clone(CloneIndexRequest request) { * IMPORTANT: Indices can only be cloned if they meet the following * requirements: *

    + *
  • The index must be marked as read-only and have a cluster health status of + * green.
  • *
  • The target index must not exist.
  • *
  • The source index must have the same number of primary shards as the * target index.
  • *
  • The node handling the clone process must have sufficient free disk space * to accommodate a second copy of the existing index.
  • *
- * + *

+ * The current write index on a data stream cannot be cloned. In order to clone + * the current write index, the data stream must first be rolled over so that a + * new write index is created and then the previous write index can be cloned. + *

+ * NOTE: Mappings cannot be specified in the _clone request. The + * mappings of the source index will be used for the target index. + *

+ * Monitor the cloning process + *

+ * The cloning process can be monitored with the cat recovery API or the cluster + * health API can be used to wait until all primary shards have been allocated + * by setting the wait_for_status parameter to yellow. + *

+ * The _clone API returns as soon as the target index has been + * added to the cluster state, before any shards have been allocated. At this + * point, all shards are in the state unassigned. If, for any reason, the target + * index can't be allocated, its primary shard will remain unassigned until it + * can be allocated on that node. + *

+ * Once the primary shard is allocated, it moves to state initializing, and the + * clone process begins. When the clone operation completes, the shard will + * become active. At that point, Elasticsearch will try to allocate any replicas + * and may decide to relocate the primary shard to another node. + *

+ * Wait for active shards + *

+ * Because the clone operation creates a new index to clone the shards to, the + * wait for active shards setting on index creation applies to the clone index + * action as well. + * * @param fn * a function that initializes a builder to create the * {@link CloneIndexRequest} @@ -393,7 +487,38 @@ public final CompletableFuture close( // ----- Endpoint: indices.create /** - * Create an index. Creates a new index. + * Create an index. You can use the create index API to add a new index to an + * Elasticsearch cluster. When creating an index, you can specify the following: + *

    + *
  • Settings for the index.
  • + *
  • Mappings for fields in the index.
  • + *
  • Index aliases
  • + *
+ *

+ * Wait for active shards + *

+ * By default, index creation will only return a response to the client when the + * primary copies of each shard have been started, or the request times out. The + * index creation response will indicate what happened. For example, + * acknowledged indicates whether the index was successfully + * created in the cluster, while shards_acknowledged indicates + * whether the requisite number of shard copies were started for each shard in + * the index before timing out. Note that it is still possible for either + * acknowledged or shards_acknowledged to be + * false, but for the index creation to be successful. These values + * simply indicate whether the operation completed before the timeout. If + * acknowledged is false, the request timed out before the cluster + * state was updated with the newly created index, but it probably will be + * created sometime soon. If shards_acknowledged is false, then the + * request timed out before the requisite number of shards were started (by + * default just the primaries), even if the cluster state was successfully + * updated to reflect the newly created index (that is to say, + * acknowledged is true). + *

+ * You can change the default of only waiting for the primary shards to start + * through the index setting index.write.wait_for_active_shards. + * Note that changing this setting will also affect the + * wait_for_active_shards value on all subsequent write operations. * * @see Documentation @@ -408,7 +533,38 @@ public CompletableFuture create(CreateIndexRequest request) } /** - * Create an index. Creates a new index. + * Create an index. You can use the create index API to add a new index to an + * Elasticsearch cluster. When creating an index, you can specify the following: + *

+ *

+ * Wait for active shards + *

+ * By default, index creation will only return a response to the client when the + * primary copies of each shard have been started, or the request times out. The + * index creation response will indicate what happened. For example, + * acknowledged indicates whether the index was successfully + * created in the cluster, while shards_acknowledged indicates + * whether the requisite number of shard copies were started for each shard in + * the index before timing out. Note that it is still possible for either + * acknowledged or shards_acknowledged to be + * false, but for the index creation to be successful. These values + * simply indicate whether the operation completed before the timeout. If + * acknowledged is false, the request timed out before the cluster + * state was updated with the newly created index, but it probably will be + * created sometime soon. If shards_acknowledged is false, then the + * request timed out before the requisite number of shards were started (by + * default just the primaries), even if the cluster state was successfully + * updated to reflect the newly created index (that is to say, + * acknowledged is true). + *

+ * You can change the default of only waiting for the primary shards to start + * through the index setting index.write.wait_for_active_shards. + * Note that changing this setting will also affect the + * wait_for_active_shards value on all subsequent write operations. * * @param fn * a function that initializes a builder to create the @@ -507,10 +663,16 @@ public CompletableFuture dataStreamsStats() { // ----- Endpoint: indices.delete /** - * Delete indices. Deletes one or more indices. + * Delete indices. Deleting an index deletes its documents, shards, and + * metadata. It does not delete related Kibana components, such as data views, + * visualizations, or dashboards. + *

+ * You cannot delete the current write index of a data stream. To delete the + * index, you must roll over the data stream so a new write index is created. + * You can then use the delete index API to delete the previous write index. * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-delete-index.html">Documentation * on elastic.co */ @@ -522,13 +684,19 @@ public CompletableFuture delete(DeleteIndexRequest request) } /** - * Delete indices. Deletes one or more indices. + * Delete indices. Deleting an index deletes its documents, shards, and + * metadata. It does not delete related Kibana components, such as data views, + * visualizations, or dashboards. + *

+ * You cannot delete the current write index of a data stream. To delete the + * index, you must roll over the data stream so a new write index is created. + * You can then use the delete index API to delete the previous write index. * * @param fn * a function that initializes a builder to create the * {@link DeleteIndexRequest} * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-delete-index.html">Documentation * on elastic.co */ @@ -543,7 +711,7 @@ public final CompletableFuture delete( * Delete an alias. Removes a data stream or index from an alias. * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-delete-alias.html">Documentation * on elastic.co */ @@ -561,7 +729,7 @@ public CompletableFuture deleteAlias(DeleteAliasRequest req * a function that initializes a builder to create the * {@link DeleteAliasRequest} * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-delete-alias.html">Documentation * on elastic.co */ @@ -649,7 +817,7 @@ public final CompletableFuture deleteDataStream( * match completely with existing templates. * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-delete-template.html">Documentation * on elastic.co */ @@ -670,7 +838,7 @@ public CompletableFuture deleteIndexTemplate(Delete * a function that initializes a builder to create the * {@link DeleteIndexTemplateRequest} * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-delete-template.html">Documentation * on elastic.co */ @@ -682,10 +850,10 @@ public final CompletableFuture deleteIndexTemplate( // ----- Endpoint: indices.delete_template /** - * Deletes a legacy index template. + * Delete a legacy index template. * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-delete-template-v1.html">Documentation * on elastic.co */ @@ -697,13 +865,13 @@ public CompletableFuture deleteTemplate(DeleteTemplateRe } /** - * Deletes a legacy index template. + * Delete a legacy index template. * * @param fn * a function that initializes a builder to create the * {@link DeleteTemplateRequest} * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-delete-template-v1.html">Documentation * on elastic.co */ @@ -719,6 +887,14 @@ public final CompletableFuture deleteTemplate( * index or data stream. This API might not support indices created in previous * Elasticsearch versions. The result of a small index can be inaccurate as some * parts of an index might not be analyzed by the API. + *

+ * NOTE: The total size of fields of the analyzed shards of the index in the + * response is usually smaller than the index store_size value + * because some small metadata files are ignored and some parts of data files + * might not be scanned by the API. Since stored fields are stored together in a + * compressed format, the sizes of stored fields are also estimates and can be + * inaccurate. The stored size of the _id field is likely + * underestimated while the _source field is overestimated. * * @see Documentation @@ -737,6 +913,14 @@ public CompletableFuture diskUsage(DiskUsageRequest request) * index or data stream. This API might not support indices created in previous * Elasticsearch versions. The result of a small index can be inaccurate as some * parts of an index might not be analyzed by the API. + *

+ * NOTE: The total size of fields of the analyzed shards of the index in the + * response is usually smaller than the index store_size value + * because some small metadata files are ignored and some parts of data files + * might not be scanned by the API. Since stored fields are stored together in a + * compressed format, the sizes of stored fields are also estimates and can be + * inaccurate. The stored size of the _id field is likely + * underestimated while the _source field is overestimated. * * @param fn * a function that initializes a builder to create the @@ -807,11 +991,11 @@ public final CompletableFuture downsample( // ----- Endpoint: indices.exists /** - * Check indices. Checks if one or more indices, index aliases, or data streams + * Check indices. Check if one or more indices, index aliases, or data streams * exist. * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-exists.html">Documentation * on elastic.co */ @@ -823,14 +1007,14 @@ public CompletableFuture exists(ExistsRequest request) { } /** - * Check indices. Checks if one or more indices, index aliases, or data streams + * Check indices. Check if one or more indices, index aliases, or data streams * exist. * * @param fn * a function that initializes a builder to create the * {@link ExistsRequest} * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-exists.html">Documentation * on elastic.co */ @@ -908,11 +1092,16 @@ public final CompletableFuture existsIndexTemplate( // ----- Endpoint: indices.exists_template /** - * Check existence of index templates. Returns information about whether a - * particular index template exists. + * Check existence of index templates. Get information about whether index + * templates exist. Index templates define settings, mappings, and aliases that + * can be applied automatically to new indices. + *

+ * IMPORTANT: This documentation is about legacy index templates, which are + * deprecated and will be replaced by the composable templates introduced in + * Elasticsearch 7.8. * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-template-exists-v1.html">Documentation * on elastic.co */ @@ -924,14 +1113,19 @@ public CompletableFuture existsTemplate(ExistsTemplateRequest r } /** - * Check existence of index templates. Returns information about whether a - * particular index template exists. + * Check existence of index templates. Get information about whether index + * templates exist. Index templates define settings, mappings, and aliases that + * can be applied automatically to new indices. + *

+ * IMPORTANT: This documentation is about legacy index templates, which are + * deprecated and will be replaced by the composable templates introduced in + * Elasticsearch 7.8. * * @param fn * a function that initializes a builder to create the * {@link ExistsTemplateRequest} * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-template-exists-v1.html">Documentation * on elastic.co */ @@ -987,9 +1181,14 @@ public final CompletableFuture explainDataLifecycl * are running on a cluster. A shard-level search request that accesses a given * field, even if multiple times during that request, is counted as a single * use. + *

+ * The response body reports the per-shard usage count of the data structures + * that back the fields in the index. A given request will increment each count + * by a maximum value of 1, even if the request accesses the same field multiple + * times. * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/field-usage-stats.html">Documentation * on elastic.co */ @@ -1006,12 +1205,17 @@ public CompletableFuture fieldUsageStats(FieldUsageStat * are running on a cluster. A shard-level search request that accesses a given * field, even if multiple times during that request, is counted as a single * use. + *

+ * The response body reports the per-shard usage count of the data structures + * that back the fields in the index. A given request will increment each count + * by a maximum value of 1, even if the request accesses the same field multiple + * times. * * @param fn * a function that initializes a builder to create the * {@link FieldUsageStatsRequest} * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/field-usage-stats.html">Documentation * on elastic.co */ @@ -1145,9 +1349,63 @@ public CompletableFuture flush() { * rapidly, resulting in higher disk usage and worse search performance. If you * regularly force merge an index receiving writes, this can also make snapshots * more expensive, since the new documents can't be backed up incrementally. - * + *

+ * Blocks during a force merge + *

+ * Calls to this API block until the merge is complete (unless request contains + * wait_for_completion=false). If the client connection is lost + * before completion then the force merge process will continue in the + * background. Any new requests to force merge the same indices will also block + * until the ongoing force merge is complete. + *

+ * Running force merge asynchronously + *

+ * If the request contains wait_for_completion=false, Elasticsearch + * performs some preflight checks, launches the request, and returns a task you + * can use to get the status of the task. However, you can not cancel this task + * as the force merge task is not cancelable. Elasticsearch creates a record of + * this task as a document at _tasks/<task_id>. When you are + * done with a task, you should delete the task document so Elasticsearch can + * reclaim the space. + *

+ * Force merging multiple indices + *

+ * You can force merge multiple indices with a single request by targeting: + *

    + *
  • One or more data streams that contain multiple backing indices
  • + *
  • Multiple indices
  • + *
  • One or more aliases
  • + *
  • All data streams and indices in a cluster
  • + *
+ *

+ * Each targeted shard is force-merged separately using the force_merge + * threadpool. By default each node only has a single force_merge + * thread which means that the shards on that node are force-merged one at a + * time. If you expand the force_merge threadpool on a node then it + * will force merge its shards in parallel + *

+ * Force merge makes the storage for the shard being merged temporarily + * increase, as it may require free space up to triple its size in case + * max_num_segments parameter is set to 1, to rewrite + * all segments into a new one. + *

+ * Data streams and time-based indices + *

+ * Force-merging is useful for managing a data stream's older backing indices + * and other time-based indices, particularly after a rollover. In these cases, + * each index only receives indexing traffic for a certain period of time. Once + * an index receive no more writes, its shards can be force-merged to a single + * segment. This can be a good idea because single-segment shards can sometimes + * use simpler and more efficient data structures to perform searches. For + * example: + * + *

+	 * POST /.ds-my-data-stream-2099.03.07-000001/_forcemerge?max_num_segments=1
+	 * 
+	 * 
+ * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-forcemerge.html">Documentation * on elastic.co */ @@ -1178,12 +1436,66 @@ public CompletableFuture forcemerge(ForcemergeRequest reques * rapidly, resulting in higher disk usage and worse search performance. If you * regularly force merge an index receiving writes, this can also make snapshots * more expensive, since the new documents can't be backed up incrementally. - * + *

+ * Blocks during a force merge + *

+ * Calls to this API block until the merge is complete (unless request contains + * wait_for_completion=false). If the client connection is lost + * before completion then the force merge process will continue in the + * background. Any new requests to force merge the same indices will also block + * until the ongoing force merge is complete. + *

+ * Running force merge asynchronously + *

+ * If the request contains wait_for_completion=false, Elasticsearch + * performs some preflight checks, launches the request, and returns a task you + * can use to get the status of the task. However, you can not cancel this task + * as the force merge task is not cancelable. Elasticsearch creates a record of + * this task as a document at _tasks/<task_id>. When you are + * done with a task, you should delete the task document so Elasticsearch can + * reclaim the space. + *

+ * Force merging multiple indices + *

+ * You can force merge multiple indices with a single request by targeting: + *

    + *
  • One or more data streams that contain multiple backing indices
  • + *
  • Multiple indices
  • + *
  • One or more aliases
  • + *
  • All data streams and indices in a cluster
  • + *
+ *

+ * Each targeted shard is force-merged separately using the force_merge + * threadpool. By default each node only has a single force_merge + * thread which means that the shards on that node are force-merged one at a + * time. If you expand the force_merge threadpool on a node then it + * will force merge its shards in parallel + *

+ * Force merge makes the storage for the shard being merged temporarily + * increase, as it may require free space up to triple its size in case + * max_num_segments parameter is set to 1, to rewrite + * all segments into a new one. + *

+ * Data streams and time-based indices + *

+ * Force-merging is useful for managing a data stream's older backing indices + * and other time-based indices, particularly after a rollover. In these cases, + * each index only receives indexing traffic for a certain period of time. Once + * an index receive no more writes, its shards can be force-merged to a single + * segment. This can be a good idea because single-segment shards can sometimes + * use simpler and more efficient data structures to perform searches. For + * example: + * + *

+	 * POST /.ds-my-data-stream-2099.03.07-000001/_forcemerge?max_num_segments=1
+	 * 
+	 * 
+ * * @param fn * a function that initializes a builder to create the * {@link ForcemergeRequest} * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-forcemerge.html">Documentation * on elastic.co */ @@ -1212,9 +1524,63 @@ public final CompletableFuture forcemerge( * rapidly, resulting in higher disk usage and worse search performance. If you * regularly force merge an index receiving writes, this can also make snapshots * more expensive, since the new documents can't be backed up incrementally. - * + *

+ * Blocks during a force merge + *

+ * Calls to this API block until the merge is complete (unless request contains + * wait_for_completion=false). If the client connection is lost + * before completion then the force merge process will continue in the + * background. Any new requests to force merge the same indices will also block + * until the ongoing force merge is complete. + *

+ * Running force merge asynchronously + *

+ * If the request contains wait_for_completion=false, Elasticsearch + * performs some preflight checks, launches the request, and returns a task you + * can use to get the status of the task. However, you can not cancel this task + * as the force merge task is not cancelable. Elasticsearch creates a record of + * this task as a document at _tasks/<task_id>. When you are + * done with a task, you should delete the task document so Elasticsearch can + * reclaim the space. + *

+ * Force merging multiple indices + *

+ * You can force merge multiple indices with a single request by targeting: + *

    + *
  • One or more data streams that contain multiple backing indices
  • + *
  • Multiple indices
  • + *
  • One or more aliases
  • + *
  • All data streams and indices in a cluster
  • + *
+ *

+ * Each targeted shard is force-merged separately using the force_merge + * threadpool. By default each node only has a single force_merge + * thread which means that the shards on that node are force-merged one at a + * time. If you expand the force_merge threadpool on a node then it + * will force merge its shards in parallel + *

+ * Force merge makes the storage for the shard being merged temporarily + * increase, as it may require free space up to triple its size in case + * max_num_segments parameter is set to 1, to rewrite + * all segments into a new one. + *

+ * Data streams and time-based indices + *

+ * Force-merging is useful for managing a data stream's older backing indices + * and other time-based indices, particularly after a rollover. In these cases, + * each index only receives indexing traffic for a certain period of time. Once + * an index receive no more writes, its shards can be force-merged to a single + * segment. This can be a good idea because single-segment shards can sometimes + * use simpler and more efficient data structures to perform searches. For + * example: + * + *

+	 * POST /.ds-my-data-stream-2099.03.07-000001/_forcemerge?max_num_segments=1
+	 * 
+	 * 
+ * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-forcemerge.html">Documentation * on elastic.co */ @@ -1226,11 +1592,11 @@ public CompletableFuture forcemerge() { // ----- Endpoint: indices.get /** - * Get index information. Returns information about one or more indices. For - * data streams, the API returns information about the stream’s backing indices. + * Get index information. Get information about one or more indices. For data + * streams, the API returns information about the stream’s backing indices. * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-get-index.html">Documentation * on elastic.co */ @@ -1242,14 +1608,14 @@ public CompletableFuture get(GetIndexRequest request) { } /** - * Get index information. Returns information about one or more indices. For - * data streams, the API returns information about the stream’s backing indices. + * Get index information. Get information about one or more indices. For data + * streams, the API returns information about the stream’s backing indices. * * @param fn * a function that initializes a builder to create the * {@link GetIndexRequest} * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-get-index.html">Documentation * on elastic.co */ @@ -1264,9 +1630,7 @@ public final CompletableFuture get( * Get aliases. Retrieves information for one or more data stream or index * aliases. * - * @see Documentation - * on elastic.co + * @see Documentation on elastic.co */ public CompletableFuture getAlias(GetAliasRequest request) { @@ -1283,9 +1647,7 @@ public CompletableFuture getAlias(GetAliasRequest request) { * @param fn * a function that initializes a builder to create the * {@link GetAliasRequest} - * @see Documentation - * on elastic.co + * @see Documentation on elastic.co */ public final CompletableFuture getAlias( @@ -1297,9 +1659,7 @@ public final CompletableFuture getAlias( * Get aliases. Retrieves information for one or more data stream or index * aliases. * - * @see Documentation - * on elastic.co + * @see Documentation on elastic.co */ public CompletableFuture getAlias() { @@ -1342,6 +1702,21 @@ public final CompletableFuture getDataLifecycle( return getDataLifecycle(fn.apply(new GetDataLifecycleRequest.Builder()).build()); } + // ----- Endpoint: indices.get_data_lifecycle_stats + + /** + * Get data stream lifecycle stats. Get statistics about the data streams that + * are managed by a data stream lifecycle. + * + * @see Documentation + * on elastic.co + */ + public CompletableFuture getDataLifecycleStats() { + return this.transport.performRequestAsync(GetDataLifecycleStatsRequest._INSTANCE, + GetDataLifecycleStatsRequest._ENDPOINT, this.transportOptions); + } + // ----- Endpoint: indices.get_data_stream /** @@ -1394,9 +1769,12 @@ public CompletableFuture getDataStream() { * Get mapping definitions. Retrieves mapping definitions for one or more * fields. For data streams, the API retrieves field mappings for the stream’s * backing indices. + *

+ * This API is useful if you don't need a complete mapping or if an index + * mapping contains a large number of fields. * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-get-field-mapping.html">Documentation * on elastic.co */ @@ -1411,12 +1789,15 @@ public CompletableFuture getFieldMapping(GetFieldMappin * Get mapping definitions. Retrieves mapping definitions for one or more * fields. For data streams, the API retrieves field mappings for the stream’s * backing indices. + *

+ * This API is useful if you don't need a complete mapping or if an index + * mapping contains a large number of fields. * * @param fn * a function that initializes a builder to create the * {@link GetFieldMappingRequest} * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-get-field-mapping.html">Documentation * on elastic.co */ @@ -1428,10 +1809,10 @@ public final CompletableFuture getFieldMapping( // ----- Endpoint: indices.get_index_template /** - * Get index templates. Returns information about one or more index templates. + * Get index templates. Get information about one or more index templates. * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-get-template.html">Documentation * on elastic.co */ @@ -1443,13 +1824,13 @@ public CompletableFuture getIndexTemplate(GetIndexTemp } /** - * Get index templates. Returns information about one or more index templates. + * Get index templates. Get information about one or more index templates. * * @param fn * a function that initializes a builder to create the * {@link GetIndexTemplateRequest} * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-get-template.html">Documentation * on elastic.co */ @@ -1459,10 +1840,10 @@ public final CompletableFuture getIndexTemplate( } /** - * Get index templates. Returns information about one or more index templates. + * Get index templates. Get information about one or more index templates. * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-get-template.html">Documentation * on elastic.co */ @@ -1474,12 +1855,11 @@ public CompletableFuture getIndexTemplate() { // ----- Endpoint: indices.get_mapping /** - * Get mapping definitions. Retrieves mapping definitions for one or more - * indices. For data streams, the API retrieves mappings for the stream’s - * backing indices. + * Get mapping definitions. For data streams, the API retrieves mappings for the + * stream’s backing indices. * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-get-mapping.html">Documentation * on elastic.co */ @@ -1491,15 +1871,14 @@ public CompletableFuture getMapping(GetMappingRequest reques } /** - * Get mapping definitions. Retrieves mapping definitions for one or more - * indices. For data streams, the API retrieves mappings for the stream’s - * backing indices. + * Get mapping definitions. For data streams, the API retrieves mappings for the + * stream’s backing indices. * * @param fn * a function that initializes a builder to create the * {@link GetMappingRequest} * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-get-mapping.html">Documentation * on elastic.co */ @@ -1509,12 +1888,11 @@ public final CompletableFuture getMapping( } /** - * Get mapping definitions. Retrieves mapping definitions for one or more - * indices. For data streams, the API retrieves mappings for the stream’s - * backing indices. + * Get mapping definitions. For data streams, the API retrieves mappings for the + * stream’s backing indices. * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-get-mapping.html">Documentation * on elastic.co */ @@ -1526,11 +1904,11 @@ public CompletableFuture getMapping() { // ----- Endpoint: indices.get_settings /** - * Get index settings. Returns setting information for one or more indices. For - * data streams, returns setting information for the stream’s backing indices. + * Get index settings. Get setting information for one or more indices. For data + * streams, it returns setting information for the stream's backing indices. * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-get-settings.html">Documentation * on elastic.co */ @@ -1542,14 +1920,14 @@ public CompletableFuture getSettings(GetIndicesSetti } /** - * Get index settings. Returns setting information for one or more indices. For - * data streams, returns setting information for the stream’s backing indices. + * Get index settings. Get setting information for one or more indices. For data + * streams, it returns setting information for the stream's backing indices. * * @param fn * a function that initializes a builder to create the * {@link GetIndicesSettingsRequest} * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-get-settings.html">Documentation * on elastic.co */ @@ -1559,11 +1937,11 @@ public final CompletableFuture getSettings( } /** - * Get index settings. Returns setting information for one or more indices. For - * data streams, returns setting information for the stream’s backing indices. + * Get index settings. Get setting information for one or more indices. For data + * streams, it returns setting information for the stream's backing indices. * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-get-settings.html">Documentation * on elastic.co */ @@ -1575,10 +1953,14 @@ public CompletableFuture getSettings() { // ----- Endpoint: indices.get_template /** - * Get index templates. Retrieves information about one or more index templates. + * Get index templates. Get information about one or more index templates. + *

+ * IMPORTANT: This documentation is about legacy index templates, which are + * deprecated and will be replaced by the composable templates introduced in + * Elasticsearch 7.8. * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-get-template-v1.html">Documentation * on elastic.co */ @@ -1590,13 +1972,17 @@ public CompletableFuture getTemplate(GetTemplateRequest req } /** - * Get index templates. Retrieves information about one or more index templates. + * Get index templates. Get information about one or more index templates. + *

+ * IMPORTANT: This documentation is about legacy index templates, which are + * deprecated and will be replaced by the composable templates introduced in + * Elasticsearch 7.8. * * @param fn * a function that initializes a builder to create the * {@link GetTemplateRequest} * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-get-template-v1.html">Documentation * on elastic.co */ @@ -1606,10 +1992,14 @@ public final CompletableFuture getTemplate( } /** - * Get index templates. Retrieves information about one or more index templates. + * Get index templates. Get information about one or more index templates. + *

+ * IMPORTANT: This documentation is about legacy index templates, which are + * deprecated and will be replaced by the composable templates introduced in + * Elasticsearch 7.8. * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-get-template-v1.html">Documentation * on elastic.co */ @@ -1705,11 +2095,42 @@ public final CompletableFuture modifyDataStream( // ----- Endpoint: indices.open /** - * Opens a closed index. For data streams, the API opens any closed backing + * Open a closed index. For data streams, the API opens any closed backing * indices. + *

+ * A closed index is blocked for read/write operations and does not allow all + * operations that opened indices allow. It is not possible to index documents + * or to search for documents in a closed index. This allows closed indices to + * not have to maintain internal data structures for indexing or searching + * documents, resulting in a smaller overhead on the cluster. + *

+ * When opening or closing an index, the master is responsible for restarting + * the index shards to reflect the new state of the index. The shards will then + * go through the normal recovery process. The data of opened or closed indices + * is automatically replicated by the cluster to ensure that enough shard copies + * are safely kept around at all times. + *

+ * You can open and close multiple indices. An error is thrown if the request + * explicitly refers to a missing index. This behavior can be turned off by + * using the ignore_unavailable=true parameter. + *

+ * By default, you must explicitly name the indices you are opening or closing. + * To open or close indices with _all, *, or other + * wildcard expressions, change the + * action.destructive_requires_name setting to false. + * This setting can also be changed with the cluster update settings API. + *

+ * Closed indices consume a significant amount of disk-space which can cause + * problems in managed environments. Closing indices can be turned off with the + * cluster settings API by setting cluster.indices.close.enable to + * false. + *

+ * Because opening or closing an index allocates its shards, the + * wait_for_active_shards setting on index creation applies to the + * _open and _close index actions as well. * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-open-close.html">Documentation * on elastic.co */ @@ -1721,14 +2142,45 @@ public CompletableFuture open(OpenRequest request) { } /** - * Opens a closed index. For data streams, the API opens any closed backing + * Open a closed index. For data streams, the API opens any closed backing * indices. + *

+ * A closed index is blocked for read/write operations and does not allow all + * operations that opened indices allow. It is not possible to index documents + * or to search for documents in a closed index. This allows closed indices to + * not have to maintain internal data structures for indexing or searching + * documents, resulting in a smaller overhead on the cluster. + *

+ * When opening or closing an index, the master is responsible for restarting + * the index shards to reflect the new state of the index. The shards will then + * go through the normal recovery process. The data of opened or closed indices + * is automatically replicated by the cluster to ensure that enough shard copies + * are safely kept around at all times. + *

+ * You can open and close multiple indices. An error is thrown if the request + * explicitly refers to a missing index. This behavior can be turned off by + * using the ignore_unavailable=true parameter. + *

+ * By default, you must explicitly name the indices you are opening or closing. + * To open or close indices with _all, *, or other + * wildcard expressions, change the + * action.destructive_requires_name setting to false. + * This setting can also be changed with the cluster update settings API. + *

+ * Closed indices consume a significant amount of disk-space which can cause + * problems in managed environments. Closing indices can be turned off with the + * cluster settings API by setting cluster.indices.close.enable to + * false. + *

+ * Because opening or closing an index allocates its shards, the + * wait_for_active_shards setting on index creation applies to the + * _open and _close index actions as well. * * @param fn * a function that initializes a builder to create the * {@link OpenRequest} * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-open-close.html">Documentation * on elastic.co */ @@ -1872,6 +2324,45 @@ public final CompletableFuture putDataLifecycle( /** * Create or update an index template. Index templates define settings, * mappings, and aliases that can be applied automatically to new indices. + *

+ * Elasticsearch applies templates to new indices based on an wildcard pattern + * that matches the index name. Index templates are applied during data stream + * or index creation. For data streams, these settings and mappings are applied + * when the stream's backing indices are created. Settings and mappings + * specified in a create index API request override any settings or mappings + * specified in an index template. Changes to index templates do not affect + * existing indices, including the existing backing indices of a data stream. + *

+ * You can use C-style /* *\/ block comments in index templates. + * You can include comments anywhere in the request body, except before the + * opening curly bracket. + *

+ * Multiple matching templates + *

+ * If multiple index templates match the name of a new index or data stream, the + * template with the highest priority is used. + *

+ * Multiple templates with overlapping index patterns at the same priority are + * not allowed and an error will be thrown when attempting to create a template + * matching an existing index template at identical priorities. + *

+ * Composing aliases, mappings, and settings + *

+ * When multiple component templates are specified in the + * composed_of field for an index template, they are merged in the + * order specified, meaning that later component templates override earlier + * component templates. Any mappings, settings, or aliases from the parent index + * template are merged in next. Finally, any configuration on the index request + * itself is merged. Mapping definitions are merged recursively, which means + * that later mapping components can introduce new field mappings and update the + * mapping configuration. If a field mapping is already contained in an earlier + * component, its definition will be completely overwritten by the later one. + * This recursive merging strategy applies not only to field mappings, but also + * root options like dynamic_templates and meta. If an + * earlier component contains a dynamic_templates block, then by + * default new dynamic_templates entries are appended onto the end. + * If an entry already exists with the same key, then it is overwritten by the + * new definition. * * @see Documentation @@ -1888,6 +2379,45 @@ public CompletableFuture putIndexTemplate(PutIndexTemp /** * Create or update an index template. Index templates define settings, * mappings, and aliases that can be applied automatically to new indices. + *

+ * Elasticsearch applies templates to new indices based on an wildcard pattern + * that matches the index name. Index templates are applied during data stream + * or index creation. For data streams, these settings and mappings are applied + * when the stream's backing indices are created. Settings and mappings + * specified in a create index API request override any settings or mappings + * specified in an index template. Changes to index templates do not affect + * existing indices, including the existing backing indices of a data stream. + *

+ * You can use C-style /* *\/ block comments in index templates. + * You can include comments anywhere in the request body, except before the + * opening curly bracket. + *

+ * Multiple matching templates + *

+ * If multiple index templates match the name of a new index or data stream, the + * template with the highest priority is used. + *

+ * Multiple templates with overlapping index patterns at the same priority are + * not allowed and an error will be thrown when attempting to create a template + * matching an existing index template at identical priorities. + *

+ * Composing aliases, mappings, and settings + *

+ * When multiple component templates are specified in the + * composed_of field for an index template, they are merged in the + * order specified, meaning that later component templates override earlier + * component templates. Any mappings, settings, or aliases from the parent index + * template are merged in next. Finally, any configuration on the index request + * itself is merged. Mapping definitions are merged recursively, which means + * that later mapping components can introduce new field mappings and update the + * mapping configuration. If a field mapping is already contained in an earlier + * component, its definition will be completely overwritten by the later one. + * This recursive merging strategy applies not only to field mappings, but also + * root options like dynamic_templates and meta. If an + * earlier component contains a dynamic_templates block, then by + * default new dynamic_templates entries are appended onto the end. + * If an entry already exists with the same key, then it is overwritten by the + * new definition. * * @param fn * a function that initializes a builder to create the @@ -1905,13 +2435,44 @@ public final CompletableFuture putIndexTemplate( // ----- Endpoint: indices.put_mapping /** - * Update field mappings. Adds new fields to an existing data stream or index. - * You can also use this API to change the search settings of existing fields. - * For data streams, these changes are applied to all backing indices by - * default. + * Update field mappings. Add new fields to an existing data stream or index. + * You can also use this API to change the search settings of existing fields + * and add new properties to existing object fields. For data streams, these + * changes are applied to all backing indices by default. + *

+ * Add multi-fields to an existing field + *

+ * Multi-fields let you index the same field in different ways. You can use this + * API to update the fields mapping parameter and enable multi-fields for an + * existing field. WARNING: If an index (or data stream) contains documents when + * you add a multi-field, those documents will not have values for the new + * multi-field. You can populate the new multi-field with the update by query + * API. + *

+ * Change supported mapping parameters for an existing field + *

+ * The documentation for each mapping parameter indicates whether you can update + * it for an existing field using this API. For example, you can use the update + * mapping API to update the ignore_above parameter. + *

+ * Change the mapping of an existing field + *

+ * Except for supported mapping parameters, you can't change the mapping or + * field type of an existing field. Changing an existing field could invalidate + * data that's already indexed. + *

+ * If you need to change the mapping of a field in a data stream's backing + * indices, refer to documentation about modifying data streams. If you need to + * change the mapping of a field in other indices, create a new index with the + * correct mapping and reindex your data into that index. + *

+ * Rename a field + *

+ * Renaming a field would invalidate data already indexed under the old field + * name. Instead, add an alias field to create an alternate field name. * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-put-mapping.html">Documentation * on elastic.co */ @@ -1923,16 +2484,47 @@ public CompletableFuture putMapping(PutMappingRequest reques } /** - * Update field mappings. Adds new fields to an existing data stream or index. - * You can also use this API to change the search settings of existing fields. - * For data streams, these changes are applied to all backing indices by - * default. + * Update field mappings. Add new fields to an existing data stream or index. + * You can also use this API to change the search settings of existing fields + * and add new properties to existing object fields. For data streams, these + * changes are applied to all backing indices by default. + *

+ * Add multi-fields to an existing field + *

+ * Multi-fields let you index the same field in different ways. You can use this + * API to update the fields mapping parameter and enable multi-fields for an + * existing field. WARNING: If an index (or data stream) contains documents when + * you add a multi-field, those documents will not have values for the new + * multi-field. You can populate the new multi-field with the update by query + * API. + *

+ * Change supported mapping parameters for an existing field + *

+ * The documentation for each mapping parameter indicates whether you can update + * it for an existing field using this API. For example, you can use the update + * mapping API to update the ignore_above parameter. + *

+ * Change the mapping of an existing field + *

+ * Except for supported mapping parameters, you can't change the mapping or + * field type of an existing field. Changing an existing field could invalidate + * data that's already indexed. + *

+ * If you need to change the mapping of a field in a data stream's backing + * indices, refer to documentation about modifying data streams. If you need to + * change the mapping of a field in other indices, create a new index with the + * correct mapping and reindex your data into that index. + *

+ * Rename a field + *

+ * Renaming a field would invalidate data already indexed under the old field + * name. Instead, add an alias field to create an alternate field name. * * @param fn * a function that initializes a builder to create the * {@link PutMappingRequest} * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-put-mapping.html">Documentation * on elastic.co */ @@ -1946,9 +2538,26 @@ public final CompletableFuture putMapping( /** * Update index settings. Changes dynamic index settings in real time. For data * streams, index setting changes are applied to all backing indices by default. + *

+ * To revert a setting to the default value, use a null value. The list of + * per-index settings that can be updated dynamically on live indices can be + * found in index module documentation. To preserve existing settings from being + * updated, set the preserve_existing parameter to + * true. + *

+ * NOTE: You can only define new analyzers on closed indices. To add an + * analyzer, you must close the index, define the analyzer, and reopen the + * index. You cannot close the write index of a data stream. To update the + * analyzer for a data stream's write index and future backing indices, update + * the analyzer in the index template used by the stream. Then roll over the + * data stream to apply the new analyzer to the stream's write index and future + * backing indices. This affects searches and any new data added to the stream + * after the rollover. However, it does not affect the data stream's backing + * indices or their existing data. To change the analyzer for existing backing + * indices, you must create a new data stream and reindex your data into it. * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-update-settings.html">Documentation * on elastic.co */ @@ -1962,12 +2571,29 @@ public CompletableFuture putSettings(PutIndicesSetti /** * Update index settings. Changes dynamic index settings in real time. For data * streams, index setting changes are applied to all backing indices by default. + *

+ * To revert a setting to the default value, use a null value. The list of + * per-index settings that can be updated dynamically on live indices can be + * found in index module documentation. To preserve existing settings from being + * updated, set the preserve_existing parameter to + * true. + *

+ * NOTE: You can only define new analyzers on closed indices. To add an + * analyzer, you must close the index, define the analyzer, and reopen the + * index. You cannot close the write index of a data stream. To update the + * analyzer for a data stream's write index and future backing indices, update + * the analyzer in the index template used by the stream. Then roll over the + * data stream to apply the new analyzer to the stream's write index and future + * backing indices. This affects searches and any new data added to the stream + * after the rollover. However, it does not affect the data stream's backing + * indices or their existing data. To change the analyzer for existing backing + * indices, you must create a new data stream and reindex your data into it. * * @param fn * a function that initializes a builder to create the * {@link PutIndicesSettingsRequest} * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-update-settings.html">Documentation * on elastic.co */ @@ -1979,9 +2605,26 @@ public final CompletableFuture putSettings( /** * Update index settings. Changes dynamic index settings in real time. For data * streams, index setting changes are applied to all backing indices by default. + *

+ * To revert a setting to the default value, use a null value. The list of + * per-index settings that can be updated dynamically on live indices can be + * found in index module documentation. To preserve existing settings from being + * updated, set the preserve_existing parameter to + * true. + *

+ * NOTE: You can only define new analyzers on closed indices. To add an + * analyzer, you must close the index, define the analyzer, and reopen the + * index. You cannot close the write index of a data stream. To update the + * analyzer for a data stream's write index and future backing indices, update + * the analyzer in the index template used by the stream. Then roll over the + * data stream to apply the new analyzer to the stream's write index and future + * backing indices. This affects searches and any new data added to the stream + * after the rollover. However, it does not affect the data stream's backing + * indices or their existing data. To change the analyzer for existing backing + * indices, you must create a new data stream and reindex your data into it. * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-update-settings.html">Documentation * on elastic.co */ @@ -2010,9 +2653,22 @@ public CompletableFuture putSettings() { * templates do not affect existing indices. Settings and mappings specified in * create index API requests override any settings or mappings specified in an * index template. + *

+ * You can use C-style /* *\/ block comments in index templates. + * You can include comments anywhere in the request body, except before the + * opening curly bracket. + *

+ * Indices matching multiple templates + *

+ * Multiple index templates can potentially match an index, in this case, both + * the settings and mappings are merged into the final configuration of the + * index. The order of the merging can be controlled using the order parameter, + * with lower order being applied first, and higher orders overriding them. + * NOTE: Multiple matching templates with the same order value will result in a + * non-deterministic merging order. * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-templates-v1.html">Documentation * on elastic.co */ @@ -2041,12 +2697,25 @@ public CompletableFuture putTemplate(PutTemplateRequest req * templates do not affect existing indices. Settings and mappings specified in * create index API requests override any settings or mappings specified in an * index template. + *

+ * You can use C-style /* *\/ block comments in index templates. + * You can include comments anywhere in the request body, except before the + * opening curly bracket. + *

+ * Indices matching multiple templates + *

+ * Multiple index templates can potentially match an index, in this case, both + * the settings and mappings are merged into the final configuration of the + * index. The order of the merging can be controlled using the order parameter, + * with lower order being applied first, and higher orders overriding them. + * NOTE: Multiple matching templates with the same order value will result in a + * non-deterministic merging order. * * @param fn * a function that initializes a builder to create the * {@link PutTemplateRequest} * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-templates-v1.html">Documentation * on elastic.co */ @@ -2062,6 +2731,9 @@ public final CompletableFuture putTemplate( * shard recoveries for one or more indices. For data streams, the API returns * information for the stream's backing indices. *

+ * All recoveries, whether ongoing or complete, are kept in the cluster state + * and may be reported on at any time. + *

* Shard recovery is the process of initializing a shard copy, such as restoring * a primary shard from a snapshot or creating a replica shard from a primary * shard. When a shard recovery completes, the recovered shard is available for @@ -2091,7 +2763,7 @@ public final CompletableFuture putTemplate( * the recovery API. * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-recovery.html">Documentation * on elastic.co */ @@ -2107,6 +2779,9 @@ public CompletableFuture recovery(RecoveryRequest request) { * shard recoveries for one or more indices. For data streams, the API returns * information for the stream's backing indices. *

+ * All recoveries, whether ongoing or complete, are kept in the cluster state + * and may be reported on at any time. + *

* Shard recovery is the process of initializing a shard copy, such as restoring * a primary shard from a snapshot or creating a replica shard from a primary * shard. When a shard recovery completes, the recovered shard is available for @@ -2139,7 +2814,7 @@ public CompletableFuture recovery(RecoveryRequest request) { * a function that initializes a builder to create the * {@link RecoveryRequest} * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-recovery.html">Documentation * on elastic.co */ @@ -2153,6 +2828,9 @@ public final CompletableFuture recovery( * shard recoveries for one or more indices. For data streams, the API returns * information for the stream's backing indices. *

+ * All recoveries, whether ongoing or complete, are kept in the cluster state + * and may be reported on at any time. + *

* Shard recovery is the process of initializing a shard copy, such as restoring * a primary shard from a snapshot or creating a replica shard from a primary * shard. When a shard recovery completes, the recovered shard is available for @@ -2182,7 +2860,7 @@ public final CompletableFuture recovery( * the recovery API. * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-recovery.html">Documentation * on elastic.co */ @@ -2197,9 +2875,26 @@ public CompletableFuture recovery() { * Refresh an index. A refresh makes recent operations performed on one or more * indices available for search. For data streams, the API runs the refresh * operation on the stream’s backing indices. + *

+ * By default, Elasticsearch periodically refreshes indices every second, but + * only on indices that have received one search request or more in the last 30 + * seconds. You can change this default interval with the + * index.refresh_interval setting. + *

+ * Refresh requests are synchronous and do not return a response until the + * refresh operation completes. + *

+ * Refreshes are resource-intensive. To ensure good cluster performance, it's + * recommended to wait for Elasticsearch's periodic refresh rather than + * performing an explicit refresh when possible. + *

+ * If your application workflow indexes documents and then runs a search to + * retrieve the indexed document, it's recommended to use the index API's + * refresh=wait_for query parameter option. This option ensures the + * indexing operation waits for a periodic refresh before running the search. * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-refresh.html">Documentation * on elastic.co */ @@ -2214,12 +2909,29 @@ public CompletableFuture refresh(RefreshRequest request) { * Refresh an index. A refresh makes recent operations performed on one or more * indices available for search. For data streams, the API runs the refresh * operation on the stream’s backing indices. + *

+ * By default, Elasticsearch periodically refreshes indices every second, but + * only on indices that have received one search request or more in the last 30 + * seconds. You can change this default interval with the + * index.refresh_interval setting. + *

+ * Refresh requests are synchronous and do not return a response until the + * refresh operation completes. + *

+ * Refreshes are resource-intensive. To ensure good cluster performance, it's + * recommended to wait for Elasticsearch's periodic refresh rather than + * performing an explicit refresh when possible. + *

+ * If your application workflow indexes documents and then runs a search to + * retrieve the indexed document, it's recommended to use the index API's + * refresh=wait_for query parameter option. This option ensures the + * indexing operation waits for a periodic refresh before running the search. * * @param fn * a function that initializes a builder to create the * {@link RefreshRequest} * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-refresh.html">Documentation * on elastic.co */ @@ -2232,9 +2944,26 @@ public final CompletableFuture refresh( * Refresh an index. A refresh makes recent operations performed on one or more * indices available for search. For data streams, the API runs the refresh * operation on the stream’s backing indices. + *

+ * By default, Elasticsearch periodically refreshes indices every second, but + * only on indices that have received one search request or more in the last 30 + * seconds. You can change this default interval with the + * index.refresh_interval setting. + *

+ * Refresh requests are synchronous and do not return a response until the + * refresh operation completes. + *

+ * Refreshes are resource-intensive. To ensure good cluster performance, it's + * recommended to wait for Elasticsearch's periodic refresh rather than + * performing an explicit refresh when possible. + *

+ * If your application workflow indexes documents and then runs a search to + * retrieve the indexed document, it's recommended to use the index API's + * refresh=wait_for query parameter option. This option ensures the + * indexing operation waits for a periodic refresh before running the search. * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-refresh.html">Documentation * on elastic.co */ @@ -2270,7 +2999,7 @@ public CompletableFuture refresh() { * in case shards are relocated in the future. * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-reload-analyzers.html">Documentation * on elastic.co */ @@ -2310,7 +3039,7 @@ public CompletableFuture reloadSearchAnalyzers( * a function that initializes a builder to create the * {@link ReloadSearchAnalyzersRequest} * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-reload-analyzers.html">Documentation * on elastic.co */ @@ -2348,9 +3077,39 @@ public final CompletableFuture reloadSearchAnalyz *

  • Cluster version information, including the Elasticsearch server * version.
  • * + *

    + * For example, + * GET /_resolve/cluster/my-index-*,cluster*:my-index-* returns + * information about the local cluster and all remotely configured clusters that + * start with the alias cluster*. Each cluster returns information + * about whether it has any indices, aliases or data streams that match + * my-index-*. + *

    + * Advantages of using this endpoint before a cross-cluster + * search + *

    + * You may want to exclude a cluster or index from a search when: + *

      + *
    • A remote cluster is not currently connected and is configured with + * skip_unavailable=false. Running a cross-cluster search under + * those conditions will cause the entire search to fail.
    • + *
    • A cluster has no matching indices, aliases or data streams for the index + * expression (or your user does not have permissions to search them). For + * example, suppose your index expression is logs*,remote1:logs* + * and the remote1 cluster has no indices, aliases or data streams that match + * logs*. In that case, that cluster will return no results from + * that cluster if you include it in a cross-cluster search.
    • + *
    • The index expression (combined with any query parameters you specify) + * will likely cause an exception to be thrown when you do the search. In these + * cases, the "error" field in the _resolve/cluster + * response will be present. (This is also where security/permission errors will + * be shown.)
    • + *
    • A remote cluster is an older version that does not support the feature + * you want to use in your search.
    • + *
    * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-resolve-cluster-api.html">Documentation * on elastic.co */ @@ -2388,12 +3147,42 @@ public CompletableFuture resolveCluster(ResolveClusterRe *
  • Cluster version information, including the Elasticsearch server * version.
  • * + *

    + * For example, + * GET /_resolve/cluster/my-index-*,cluster*:my-index-* returns + * information about the local cluster and all remotely configured clusters that + * start with the alias cluster*. Each cluster returns information + * about whether it has any indices, aliases or data streams that match + * my-index-*. + *

    + * Advantages of using this endpoint before a cross-cluster + * search + *

    + * You may want to exclude a cluster or index from a search when: + *

      + *
    • A remote cluster is not currently connected and is configured with + * skip_unavailable=false. Running a cross-cluster search under + * those conditions will cause the entire search to fail.
    • + *
    • A cluster has no matching indices, aliases or data streams for the index + * expression (or your user does not have permissions to search them). For + * example, suppose your index expression is logs*,remote1:logs* + * and the remote1 cluster has no indices, aliases or data streams that match + * logs*. In that case, that cluster will return no results from + * that cluster if you include it in a cross-cluster search.
    • + *
    • The index expression (combined with any query parameters you specify) + * will likely cause an exception to be thrown when you do the search. In these + * cases, the "error" field in the _resolve/cluster + * response will be present. (This is also where security/permission errors will + * be shown.)
    • + *
    • A remote cluster is an older version that does not support the feature + * you want to use in your search.
    • + *
    * * @param fn * a function that initializes a builder to create the * {@link ResolveClusterRequest} * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-resolve-cluster-api.html">Documentation * on elastic.co */ @@ -2410,7 +3199,7 @@ public final CompletableFuture resolveCluster( * supported. * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-resolve-index-api.html">Documentation * on elastic.co */ @@ -2430,7 +3219,7 @@ public CompletableFuture resolveIndex(ResolveIndexRequest * a function that initializes a builder to create the * {@link ResolveIndexRequest} * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-resolve-index-api.html">Documentation * on elastic.co */ @@ -2442,8 +3231,57 @@ public final CompletableFuture resolveIndex( // ----- Endpoint: indices.rollover /** - * Roll over to a new index. Creates a new index for a data stream or index + * Roll over to a new index. TIP: It is recommended to use the index lifecycle + * rollover action to automate rollovers. + *

    + * The rollover API creates a new index for a data stream or index alias. The + * API behavior depends on the rollover target. + *

    + * Roll over a data stream + *

    + * If you roll over a data stream, the API creates a new write index for the + * stream. The stream's previous write index becomes a regular backing index. A + * rollover also increments the data stream's generation. + *

    + * Roll over an index alias with a write index + *

    + * TIP: Prior to Elasticsearch 7.9, you'd typically use an index alias with a + * write index to manage time series data. Data streams replace this + * functionality, require less maintenance, and automatically integrate with + * data tiers. + *

    + * If an index alias points to multiple indices, one of the indices must be a + * write index. The rollover API creates a new write index for the alias with + * is_write_index set to true. The API also + * sets is_write_index to false for the previous write + * index. + *

    + * Roll over an index alias with one index + *

    + * If you roll over an index alias that points to only one index, the API + * creates a new index for the alias and removes the original index from the * alias. + *

    + * NOTE: A rollover creates a new index and is subject to the + * wait_for_active_shards setting. + *

    + * Increment index names for an alias + *

    + * When you roll over an index alias, you can specify a name for the new index. + * If you don't specify a name and the current index ends with - + * and a number, such as my-index-000001 or + * my-index-3, the new index name increments that number. For + * example, if you roll over an alias with a current index of + * my-index-000001, the rollover creates a new index named + * my-index-000002. This number is always six characters and + * zero-padded, regardless of the previous index's name. + *

    + * If you use an index alias for time series data, you can use date math in the + * index name to track the rollover date. For example, you can create an alias + * that points to an index named <my-index-{now/d}-000001>. + * If you create the index on May 6, 2099, the index's name is + * my-index-2099.05.06-000001. If you roll over the alias on May 7, + * 2099, the new index's name is my-index-2099.05.07-000002. * * @see Documentation @@ -2458,8 +3296,57 @@ public CompletableFuture rollover(RolloverRequest request) { } /** - * Roll over to a new index. Creates a new index for a data stream or index + * Roll over to a new index. TIP: It is recommended to use the index lifecycle + * rollover action to automate rollovers. + *

    + * The rollover API creates a new index for a data stream or index alias. The + * API behavior depends on the rollover target. + *

    + * Roll over a data stream + *

    + * If you roll over a data stream, the API creates a new write index for the + * stream. The stream's previous write index becomes a regular backing index. A + * rollover also increments the data stream's generation. + *

    + * Roll over an index alias with a write index + *

    + * TIP: Prior to Elasticsearch 7.9, you'd typically use an index alias with a + * write index to manage time series data. Data streams replace this + * functionality, require less maintenance, and automatically integrate with + * data tiers. + *

    + * If an index alias points to multiple indices, one of the indices must be a + * write index. The rollover API creates a new write index for the alias with + * is_write_index set to true. The API also + * sets is_write_index to false for the previous write + * index. + *

    + * Roll over an index alias with one index + *

    + * If you roll over an index alias that points to only one index, the API + * creates a new index for the alias and removes the original index from the * alias. + *

    + * NOTE: A rollover creates a new index and is subject to the + * wait_for_active_shards setting. + *

    + * Increment index names for an alias + *

    + * When you roll over an index alias, you can specify a name for the new index. + * If you don't specify a name and the current index ends with - + * and a number, such as my-index-000001 or + * my-index-3, the new index name increments that number. For + * example, if you roll over an alias with a current index of + * my-index-000001, the rollover creates a new index named + * my-index-000002. This number is always six characters and + * zero-padded, regardless of the previous index's name. + *

    + * If you use an index alias for time series data, you can use date math in the + * index name to track the rollover date. For example, you can create an alias + * that points to an index named <my-index-{now/d}-000001>. + * If you create the index on May 6, 2099, the index's name is + * my-index-2099.05.06-000001. If you roll over the alias on May 7, + * 2099, the new index's name is my-index-2099.05.07-000002. * * @param fn * a function that initializes a builder to create the @@ -2482,7 +3369,7 @@ public final CompletableFuture rollover( * stream's backing indices. * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-segments.html">Documentation * on elastic.co */ @@ -2502,7 +3389,7 @@ public CompletableFuture segments(SegmentsRequest request) { * a function that initializes a builder to create the * {@link SegmentsRequest} * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-segments.html">Documentation * on elastic.co */ @@ -2517,7 +3404,7 @@ public final CompletableFuture segments( * stream's backing indices. * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-segments.html">Documentation * on elastic.co */ @@ -2546,7 +3433,7 @@ public CompletableFuture segments() { * are unassigned or have one or more unassigned replica shards. * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-shards-stores.html">Documentation * on elastic.co */ @@ -2578,7 +3465,7 @@ public CompletableFuture shardStores(ShardStoresRequest req * a function that initializes a builder to create the * {@link ShardStoresRequest} * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-shards-stores.html">Documentation * on elastic.co */ @@ -2605,7 +3492,7 @@ public final CompletableFuture shardStores( * are unassigned or have one or more unassigned replica shards. * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-shards-stores.html">Documentation * on elastic.co */ @@ -2758,11 +3645,11 @@ public final CompletableFuture shrink( // ----- Endpoint: indices.simulate_index_template /** - * Simulate an index. Returns the index configuration that would be applied to - * the specified index from an existing index template. + * Simulate an index. Get the index configuration that would be applied to the + * specified index from an existing index template. * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/{master}/indices-simulate-index.html">Documentation * on elastic.co */ @@ -2775,14 +3662,14 @@ public CompletableFuture simulateIndexTemplate( } /** - * Simulate an index. Returns the index configuration that would be applied to - * the specified index from an existing index template. + * Simulate an index. Get the index configuration that would be applied to the + * specified index from an existing index template. * * @param fn * a function that initializes a builder to create the * {@link SimulateIndexTemplateRequest} * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/{master}/indices-simulate-index.html">Documentation * on elastic.co */ @@ -2794,11 +3681,11 @@ public final CompletableFuture simulateIndexTempl // ----- Endpoint: indices.simulate_template /** - * Simulate an index template. Returns the index configuration that would be - * applied by a particular index template. + * Simulate an index template. Get the index configuration that would be applied + * by a particular index template. * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-simulate-template.html">Documentation * on elastic.co */ @@ -2810,14 +3697,14 @@ public CompletableFuture simulateTemplate(SimulateTemp } /** - * Simulate an index template. Returns the index configuration that would be - * applied by a particular index template. + * Simulate an index template. Get the index configuration that would be applied + * by a particular index template. * * @param fn * a function that initializes a builder to create the * {@link SimulateTemplateRequest} * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-simulate-template.html">Documentation * on elastic.co */ @@ -2827,11 +3714,11 @@ public final CompletableFuture simulateTemplate( } /** - * Simulate an index template. Returns the index configuration that would be - * applied by a particular index template. + * Simulate an index template. Get the index configuration that would be applied + * by a particular index template. * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-simulate-template.html">Documentation * on elastic.co */ @@ -2862,6 +3749,18 @@ public CompletableFuture simulateTemplate() { * * *

    + * You can do make an index read-only with the following request using the add + * index block API: + * + *

    +	 * PUT /my_source_index/_block/write
    +	 * 
    +	 * 
    + *

    + * The current write index on a data stream cannot be split. In order to split + * the current write index, the data stream must first be rolled over so that a + * new write index is created and then the previous write index can be split. + *

    * The number of times the index can be split (and the number of shards that * each original shard can be split into) is determined by the * index.number_of_routing_shards setting. The number of routing @@ -2927,6 +3826,18 @@ public CompletableFuture split(SplitRequest request) { * * *

    + * You can do make an index read-only with the following request using the add + * index block API: + * + *

    +	 * PUT /my_source_index/_block/write
    +	 * 
    +	 * 
    + *

    + * The current write index on a data stream cannot be split. In order to split + * the current write index, the data stream must first be rolled over so that a + * new write index is created and then the previous write index can be split. + *

    * The number of times the index can be split (and the number of shards that * each original shard can be split into) is determined by the * index.number_of_routing_shards setting. The number of routing @@ -2993,7 +3904,7 @@ public final CompletableFuture split( * any node-level statistics to which the shard contributed. * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-stats.html">Documentation * on elastic.co */ @@ -3025,7 +3936,7 @@ public CompletableFuture stats(IndicesStatsRequest request * a function that initializes a builder to create the * {@link IndicesStatsRequest} * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-stats.html">Documentation * on elastic.co */ @@ -3052,7 +3963,7 @@ public final CompletableFuture stats( * any node-level statistics to which the shard contributed. * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-stats.html">Documentation * on elastic.co */ @@ -3068,7 +3979,7 @@ public CompletableFuture stats() { * the normal recovery process and becomes writeable again. * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/unfreeze-index-api.html">Documentation * on elastic.co */ @@ -3087,7 +3998,7 @@ public CompletableFuture unfreeze(UnfreezeRequest request) { * a function that initializes a builder to create the * {@link UnfreezeRequest} * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/unfreeze-index-api.html">Documentation * on elastic.co */ diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/ElasticsearchIndicesClient.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/ElasticsearchIndicesClient.java index dbcdd66cc..55890d691 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/ElasticsearchIndicesClient.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/ElasticsearchIndicesClient.java @@ -104,9 +104,14 @@ public final AddBlockResponse addBlock(Functionanalysis - * on a text string and returns the resulting tokens. + * Get tokens from text analysis. The analyze API performs analysis on a text + * string and returns the resulting tokens. + *

    + * Generating excessive amount of tokens may cause a node to run out of memory. + * The index.analyze.max_token_count setting enables you to limit + * the number of tokens that can be produced. If more than this limit of tokens + * gets generated, an error occurs. The _analyze endpoint without a + * specified index will always use 10000 as its limit. * * @see Documentation @@ -121,9 +126,14 @@ public AnalyzeResponse analyze(AnalyzeRequest request) throws IOException, Elast } /** - * Get tokens from text analysis. The analyze API performs analysis - * on a text string and returns the resulting tokens. + * Get tokens from text analysis. The analyze API performs analysis on a text + * string and returns the resulting tokens. + *

    + * Generating excessive amount of tokens may cause a node to run out of memory. + * The index.analyze.max_token_count setting enables you to limit + * the number of tokens that can be produced. If more than this limit of tokens + * gets generated, an error occurs. The _analyze endpoint without a + * specified index will always use 10000 as its limit. * * @param fn * a function that initializes a builder to create the @@ -139,9 +149,14 @@ public final AnalyzeResponse analyze(Functionanalysis - * on a text string and returns the resulting tokens. + * Get tokens from text analysis. The analyze API performs analysis on a text + * string and returns the resulting tokens. + *

    + * Generating excessive amount of tokens may cause a node to run out of memory. + * The index.analyze.max_token_count setting enables you to limit + * the number of tokens that can be produced. If more than this limit of tokens + * gets generated, an error occurs. The _analyze endpoint without a + * specified index will always use 10000 as its limit. * * @see Documentation @@ -158,6 +173,11 @@ public AnalyzeResponse analyze() throws IOException, ElasticsearchException { /** * Clear the cache. Clear the cache of one or more indices. For data streams, * the API clears the caches of the stream's backing indices. + *

    + * By default, the clear cache API clears all caches. To clear only specific + * caches, use the fielddata, query, or + * request parameters. To clear the cache only of specific fields, + * use the fields parameter. * * @see Documentation @@ -174,6 +194,11 @@ public ClearCacheResponse clearCache(ClearCacheRequest request) throws IOExcepti /** * Clear the cache. Clear the cache of one or more indices. For data streams, * the API clears the caches of the stream's backing indices. + *

    + * By default, the clear cache API clears all caches. To clear only specific + * caches, use the fielddata, query, or + * request parameters. To clear the cache only of specific fields, + * use the fields parameter. * * @param fn * a function that initializes a builder to create the @@ -191,6 +216,11 @@ public final ClearCacheResponse clearCache(Function + * By default, the clear cache API clears all caches. To clear only specific + * caches, use the fielddata, query, or + * request parameters. To clear the cache only of specific fields, + * use the fields parameter. * * @see Documentation @@ -234,13 +264,45 @@ public ClearCacheResponse clearCache() throws IOException, ElasticsearchExceptio * IMPORTANT: Indices can only be cloned if they meet the following * requirements: *

    - * + *

    + * The current write index on a data stream cannot be cloned. In order to clone + * the current write index, the data stream must first be rolled over so that a + * new write index is created and then the previous write index can be cloned. + *

    + * NOTE: Mappings cannot be specified in the _clone request. The + * mappings of the source index will be used for the target index. + *

    + * Monitor the cloning process + *

    + * The cloning process can be monitored with the cat recovery API or the cluster + * health API can be used to wait until all primary shards have been allocated + * by setting the wait_for_status parameter to yellow. + *

    + * The _clone API returns as soon as the target index has been + * added to the cluster state, before any shards have been allocated. At this + * point, all shards are in the state unassigned. If, for any reason, the target + * index can't be allocated, its primary shard will remain unassigned until it + * can be allocated on that node. + *

    + * Once the primary shard is allocated, it moves to state initializing, and the + * clone process begins. When the clone operation completes, the shard will + * become active. At that point, Elasticsearch will try to allocate any replicas + * and may decide to relocate the primary shard to another node. + *

    + * Wait for active shards + *

    + * Because the clone operation creates a new index to clone the shards to, the + * wait for active shards setting on index creation applies to the clone index + * action as well. + * * @see Documentation * on elastic.co @@ -283,13 +345,45 @@ public CloneIndexResponse clone(CloneIndexRequest request) throws IOException, E * IMPORTANT: Indices can only be cloned if they meet the following * requirements: *

      + *
    • The index must be marked as read-only and have a cluster health status of + * green.
    • *
    • The target index must not exist.
    • *
    • The source index must have the same number of primary shards as the * target index.
    • *
    • The node handling the clone process must have sufficient free disk space * to accommodate a second copy of the existing index.
    • *
    - * + *

    + * The current write index on a data stream cannot be cloned. In order to clone + * the current write index, the data stream must first be rolled over so that a + * new write index is created and then the previous write index can be cloned. + *

    + * NOTE: Mappings cannot be specified in the _clone request. The + * mappings of the source index will be used for the target index. + *

    + * Monitor the cloning process + *

    + * The cloning process can be monitored with the cat recovery API or the cluster + * health API can be used to wait until all primary shards have been allocated + * by setting the wait_for_status parameter to yellow. + *

    + * The _clone API returns as soon as the target index has been + * added to the cluster state, before any shards have been allocated. At this + * point, all shards are in the state unassigned. If, for any reason, the target + * index can't be allocated, its primary shard will remain unassigned until it + * can be allocated on that node. + *

    + * Once the primary shard is allocated, it moves to state initializing, and the + * clone process begins. When the clone operation completes, the shard will + * become active. At that point, Elasticsearch will try to allocate any replicas + * and may decide to relocate the primary shard to another node. + *

    + * Wait for active shards + *

    + * Because the clone operation creates a new index to clone the shards to, the + * wait for active shards setting on index creation applies to the clone index + * action as well. + * * @param fn * a function that initializes a builder to create the * {@link CloneIndexRequest} @@ -391,7 +485,38 @@ public final CloseIndexResponse close(Function + *

  • Settings for the index.
  • + *
  • Mappings for fields in the index.
  • + *
  • Index aliases
  • + * + *

    + * Wait for active shards + *

    + * By default, index creation will only return a response to the client when the + * primary copies of each shard have been started, or the request times out. The + * index creation response will indicate what happened. For example, + * acknowledged indicates whether the index was successfully + * created in the cluster, while shards_acknowledged indicates + * whether the requisite number of shard copies were started for each shard in + * the index before timing out. Note that it is still possible for either + * acknowledged or shards_acknowledged to be + * false, but for the index creation to be successful. These values + * simply indicate whether the operation completed before the timeout. If + * acknowledged is false, the request timed out before the cluster + * state was updated with the newly created index, but it probably will be + * created sometime soon. If shards_acknowledged is false, then the + * request timed out before the requisite number of shards were started (by + * default just the primaries), even if the cluster state was successfully + * updated to reflect the newly created index (that is to say, + * acknowledged is true). + *

    + * You can change the default of only waiting for the primary shards to start + * through the index setting index.write.wait_for_active_shards. + * Note that changing this setting will also affect the + * wait_for_active_shards value on all subsequent write operations. * * @see Documentation @@ -406,7 +531,38 @@ public CreateIndexResponse create(CreateIndexRequest request) throws IOException } /** - * Create an index. Creates a new index. + * Create an index. You can use the create index API to add a new index to an + * Elasticsearch cluster. When creating an index, you can specify the following: + *

    + *

    + * Wait for active shards + *

    + * By default, index creation will only return a response to the client when the + * primary copies of each shard have been started, or the request times out. The + * index creation response will indicate what happened. For example, + * acknowledged indicates whether the index was successfully + * created in the cluster, while shards_acknowledged indicates + * whether the requisite number of shard copies were started for each shard in + * the index before timing out. Note that it is still possible for either + * acknowledged or shards_acknowledged to be + * false, but for the index creation to be successful. These values + * simply indicate whether the operation completed before the timeout. If + * acknowledged is false, the request timed out before the cluster + * state was updated with the newly created index, but it probably will be + * created sometime soon. If shards_acknowledged is false, then the + * request timed out before the requisite number of shards were started (by + * default just the primaries), even if the cluster state was successfully + * updated to reflect the newly created index (that is to say, + * acknowledged is true). + *

    + * You can change the default of only waiting for the primary shards to start + * through the index setting index.write.wait_for_active_shards. + * Note that changing this setting will also affect the + * wait_for_active_shards value on all subsequent write operations. * * @param fn * a function that initializes a builder to create the @@ -509,10 +665,16 @@ public DataStreamsStatsResponse dataStreamsStats() throws IOException, Elasticse // ----- Endpoint: indices.delete /** - * Delete indices. Deletes one or more indices. + * Delete indices. Deleting an index deletes its documents, shards, and + * metadata. It does not delete related Kibana components, such as data views, + * visualizations, or dashboards. + *

    + * You cannot delete the current write index of a data stream. To delete the + * index, you must roll over the data stream so a new write index is created. + * You can then use the delete index API to delete the previous write index. * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-delete-index.html">Documentation * on elastic.co */ @@ -524,13 +686,19 @@ public DeleteIndexResponse delete(DeleteIndexRequest request) throws IOException } /** - * Delete indices. Deletes one or more indices. + * Delete indices. Deleting an index deletes its documents, shards, and + * metadata. It does not delete related Kibana components, such as data views, + * visualizations, or dashboards. + *

    + * You cannot delete the current write index of a data stream. To delete the + * index, you must roll over the data stream so a new write index is created. + * You can then use the delete index API to delete the previous write index. * * @param fn * a function that initializes a builder to create the * {@link DeleteIndexRequest} * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-delete-index.html">Documentation * on elastic.co */ @@ -545,7 +713,7 @@ public final DeleteIndexResponse delete(FunctionDocumentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-delete-alias.html">Documentation * on elastic.co */ @@ -563,7 +731,7 @@ public DeleteAliasResponse deleteAlias(DeleteAliasRequest request) throws IOExce * a function that initializes a builder to create the * {@link DeleteAliasRequest} * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-delete-alias.html">Documentation * on elastic.co */ @@ -656,7 +824,7 @@ public final DeleteDataStreamResponse deleteDataStream( * match completely with existing templates. * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-delete-template.html">Documentation * on elastic.co */ @@ -678,7 +846,7 @@ public DeleteIndexTemplateResponse deleteIndexTemplate(DeleteIndexTemplateReques * a function that initializes a builder to create the * {@link DeleteIndexTemplateRequest} * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-delete-template.html">Documentation * on elastic.co */ @@ -691,10 +859,10 @@ public final DeleteIndexTemplateResponse deleteIndexTemplate( // ----- Endpoint: indices.delete_template /** - * Deletes a legacy index template. + * Delete a legacy index template. * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-delete-template-v1.html">Documentation * on elastic.co */ @@ -707,13 +875,13 @@ public DeleteTemplateResponse deleteTemplate(DeleteTemplateRequest request) } /** - * Deletes a legacy index template. + * Delete a legacy index template. * * @param fn * a function that initializes a builder to create the * {@link DeleteTemplateRequest} * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-delete-template-v1.html">Documentation * on elastic.co */ @@ -730,6 +898,14 @@ public final DeleteTemplateResponse deleteTemplate( * index or data stream. This API might not support indices created in previous * Elasticsearch versions. The result of a small index can be inaccurate as some * parts of an index might not be analyzed by the API. + *

    + * NOTE: The total size of fields of the analyzed shards of the index in the + * response is usually smaller than the index store_size value + * because some small metadata files are ignored and some parts of data files + * might not be scanned by the API. Since stored fields are stored together in a + * compressed format, the sizes of stored fields are also estimates and can be + * inaccurate. The stored size of the _id field is likely + * underestimated while the _source field is overestimated. * * @see Documentation @@ -748,6 +924,14 @@ public DiskUsageResponse diskUsage(DiskUsageRequest request) throws IOException, * index or data stream. This API might not support indices created in previous * Elasticsearch versions. The result of a small index can be inaccurate as some * parts of an index might not be analyzed by the API. + *

    + * NOTE: The total size of fields of the analyzed shards of the index in the + * response is usually smaller than the index store_size value + * because some small metadata files are ignored and some parts of data files + * might not be scanned by the API. Since stored fields are stored together in a + * compressed format, the sizes of stored fields are also estimates and can be + * inaccurate. The stored size of the _id field is likely + * underestimated while the _source field is overestimated. * * @param fn * a function that initializes a builder to create the @@ -818,11 +1002,11 @@ public final DownsampleResponse downsample(FunctionDocumentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-exists.html">Documentation * on elastic.co */ @@ -834,14 +1018,14 @@ public BooleanResponse exists(ExistsRequest request) throws IOException, Elastic } /** - * Check indices. Checks if one or more indices, index aliases, or data streams + * Check indices. Check if one or more indices, index aliases, or data streams * exist. * * @param fn * a function that initializes a builder to create the * {@link ExistsRequest} * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-exists.html">Documentation * on elastic.co */ @@ -921,11 +1105,16 @@ public final BooleanResponse existsIndexTemplate( // ----- Endpoint: indices.exists_template /** - * Check existence of index templates. Returns information about whether a - * particular index template exists. + * Check existence of index templates. Get information about whether index + * templates exist. Index templates define settings, mappings, and aliases that + * can be applied automatically to new indices. + *

    + * IMPORTANT: This documentation is about legacy index templates, which are + * deprecated and will be replaced by the composable templates introduced in + * Elasticsearch 7.8. * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-template-exists-v1.html">Documentation * on elastic.co */ @@ -937,14 +1126,19 @@ public BooleanResponse existsTemplate(ExistsTemplateRequest request) throws IOEx } /** - * Check existence of index templates. Returns information about whether a - * particular index template exists. + * Check existence of index templates. Get information about whether index + * templates exist. Index templates define settings, mappings, and aliases that + * can be applied automatically to new indices. + *

    + * IMPORTANT: This documentation is about legacy index templates, which are + * deprecated and will be replaced by the composable templates introduced in + * Elasticsearch 7.8. * * @param fn * a function that initializes a builder to create the * {@link ExistsTemplateRequest} * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-template-exists-v1.html">Documentation * on elastic.co */ @@ -1003,9 +1197,14 @@ public final ExplainDataLifecycleResponse explainDataLifecycle( * are running on a cluster. A shard-level search request that accesses a given * field, even if multiple times during that request, is counted as a single * use. + *

    + * The response body reports the per-shard usage count of the data structures + * that back the fields in the index. A given request will increment each count + * by a maximum value of 1, even if the request accesses the same field multiple + * times. * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/field-usage-stats.html">Documentation * on elastic.co */ @@ -1023,12 +1222,17 @@ public FieldUsageStatsResponse fieldUsageStats(FieldUsageStatsRequest request) * are running on a cluster. A shard-level search request that accesses a given * field, even if multiple times during that request, is counted as a single * use. + *

    + * The response body reports the per-shard usage count of the data structures + * that back the fields in the index. A given request will increment each count + * by a maximum value of 1, even if the request accesses the same field multiple + * times. * * @param fn * a function that initializes a builder to create the * {@link FieldUsageStatsRequest} * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/field-usage-stats.html">Documentation * on elastic.co */ @@ -1163,9 +1367,63 @@ public FlushResponse flush() throws IOException, ElasticsearchException { * rapidly, resulting in higher disk usage and worse search performance. If you * regularly force merge an index receiving writes, this can also make snapshots * more expensive, since the new documents can't be backed up incrementally. - * + *

    + * Blocks during a force merge + *

    + * Calls to this API block until the merge is complete (unless request contains + * wait_for_completion=false). If the client connection is lost + * before completion then the force merge process will continue in the + * background. Any new requests to force merge the same indices will also block + * until the ongoing force merge is complete. + *

    + * Running force merge asynchronously + *

    + * If the request contains wait_for_completion=false, Elasticsearch + * performs some preflight checks, launches the request, and returns a task you + * can use to get the status of the task. However, you can not cancel this task + * as the force merge task is not cancelable. Elasticsearch creates a record of + * this task as a document at _tasks/<task_id>. When you are + * done with a task, you should delete the task document so Elasticsearch can + * reclaim the space. + *

    + * Force merging multiple indices + *

    + * You can force merge multiple indices with a single request by targeting: + *

      + *
    • One or more data streams that contain multiple backing indices
    • + *
    • Multiple indices
    • + *
    • One or more aliases
    • + *
    • All data streams and indices in a cluster
    • + *
    + *

    + * Each targeted shard is force-merged separately using the force_merge + * threadpool. By default each node only has a single force_merge + * thread which means that the shards on that node are force-merged one at a + * time. If you expand the force_merge threadpool on a node then it + * will force merge its shards in parallel + *

    + * Force merge makes the storage for the shard being merged temporarily + * increase, as it may require free space up to triple its size in case + * max_num_segments parameter is set to 1, to rewrite + * all segments into a new one. + *

    + * Data streams and time-based indices + *

    + * Force-merging is useful for managing a data stream's older backing indices + * and other time-based indices, particularly after a rollover. In these cases, + * each index only receives indexing traffic for a certain period of time. Once + * an index receive no more writes, its shards can be force-merged to a single + * segment. This can be a good idea because single-segment shards can sometimes + * use simpler and more efficient data structures to perform searches. For + * example: + * + *

    +	 * POST /.ds-my-data-stream-2099.03.07-000001/_forcemerge?max_num_segments=1
    +	 * 
    +	 * 
    + * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-forcemerge.html">Documentation * on elastic.co */ @@ -1196,12 +1454,66 @@ public ForcemergeResponse forcemerge(ForcemergeRequest request) throws IOExcepti * rapidly, resulting in higher disk usage and worse search performance. If you * regularly force merge an index receiving writes, this can also make snapshots * more expensive, since the new documents can't be backed up incrementally. - * + *

    + * Blocks during a force merge + *

    + * Calls to this API block until the merge is complete (unless request contains + * wait_for_completion=false). If the client connection is lost + * before completion then the force merge process will continue in the + * background. Any new requests to force merge the same indices will also block + * until the ongoing force merge is complete. + *

    + * Running force merge asynchronously + *

    + * If the request contains wait_for_completion=false, Elasticsearch + * performs some preflight checks, launches the request, and returns a task you + * can use to get the status of the task. However, you can not cancel this task + * as the force merge task is not cancelable. Elasticsearch creates a record of + * this task as a document at _tasks/<task_id>. When you are + * done with a task, you should delete the task document so Elasticsearch can + * reclaim the space. + *

    + * Force merging multiple indices + *

    + * You can force merge multiple indices with a single request by targeting: + *

      + *
    • One or more data streams that contain multiple backing indices
    • + *
    • Multiple indices
    • + *
    • One or more aliases
    • + *
    • All data streams and indices in a cluster
    • + *
    + *

    + * Each targeted shard is force-merged separately using the force_merge + * threadpool. By default each node only has a single force_merge + * thread which means that the shards on that node are force-merged one at a + * time. If you expand the force_merge threadpool on a node then it + * will force merge its shards in parallel + *

    + * Force merge makes the storage for the shard being merged temporarily + * increase, as it may require free space up to triple its size in case + * max_num_segments parameter is set to 1, to rewrite + * all segments into a new one. + *

    + * Data streams and time-based indices + *

    + * Force-merging is useful for managing a data stream's older backing indices + * and other time-based indices, particularly after a rollover. In these cases, + * each index only receives indexing traffic for a certain period of time. Once + * an index receive no more writes, its shards can be force-merged to a single + * segment. This can be a good idea because single-segment shards can sometimes + * use simpler and more efficient data structures to perform searches. For + * example: + * + *

    +	 * POST /.ds-my-data-stream-2099.03.07-000001/_forcemerge?max_num_segments=1
    +	 * 
    +	 * 
    + * * @param fn * a function that initializes a builder to create the * {@link ForcemergeRequest} * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-forcemerge.html">Documentation * on elastic.co */ @@ -1230,9 +1542,63 @@ public final ForcemergeResponse forcemerge(Function + * Blocks during a force merge + *

    + * Calls to this API block until the merge is complete (unless request contains + * wait_for_completion=false). If the client connection is lost + * before completion then the force merge process will continue in the + * background. Any new requests to force merge the same indices will also block + * until the ongoing force merge is complete. + *

    + * Running force merge asynchronously + *

    + * If the request contains wait_for_completion=false, Elasticsearch + * performs some preflight checks, launches the request, and returns a task you + * can use to get the status of the task. However, you can not cancel this task + * as the force merge task is not cancelable. Elasticsearch creates a record of + * this task as a document at _tasks/<task_id>. When you are + * done with a task, you should delete the task document so Elasticsearch can + * reclaim the space. + *

    + * Force merging multiple indices + *

    + * You can force merge multiple indices with a single request by targeting: + *

      + *
    • One or more data streams that contain multiple backing indices
    • + *
    • Multiple indices
    • + *
    • One or more aliases
    • + *
    • All data streams and indices in a cluster
    • + *
    + *

    + * Each targeted shard is force-merged separately using the force_merge + * threadpool. By default each node only has a single force_merge + * thread which means that the shards on that node are force-merged one at a + * time. If you expand the force_merge threadpool on a node then it + * will force merge its shards in parallel + *

    + * Force merge makes the storage for the shard being merged temporarily + * increase, as it may require free space up to triple its size in case + * max_num_segments parameter is set to 1, to rewrite + * all segments into a new one. + *

    + * Data streams and time-based indices + *

    + * Force-merging is useful for managing a data stream's older backing indices + * and other time-based indices, particularly after a rollover. In these cases, + * each index only receives indexing traffic for a certain period of time. Once + * an index receive no more writes, its shards can be force-merged to a single + * segment. This can be a good idea because single-segment shards can sometimes + * use simpler and more efficient data structures to perform searches. For + * example: + * + *

    +	 * POST /.ds-my-data-stream-2099.03.07-000001/_forcemerge?max_num_segments=1
    +	 * 
    +	 * 
    + * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-forcemerge.html">Documentation * on elastic.co */ @@ -1244,11 +1610,11 @@ public ForcemergeResponse forcemerge() throws IOException, ElasticsearchExceptio // ----- Endpoint: indices.get /** - * Get index information. Returns information about one or more indices. For - * data streams, the API returns information about the stream’s backing indices. + * Get index information. Get information about one or more indices. For data + * streams, the API returns information about the stream’s backing indices. * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-get-index.html">Documentation * on elastic.co */ @@ -1260,14 +1626,14 @@ public GetIndexResponse get(GetIndexRequest request) throws IOException, Elastic } /** - * Get index information. Returns information about one or more indices. For - * data streams, the API returns information about the stream’s backing indices. + * Get index information. Get information about one or more indices. For data + * streams, the API returns information about the stream’s backing indices. * * @param fn * a function that initializes a builder to create the * {@link GetIndexRequest} * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-get-index.html">Documentation * on elastic.co */ @@ -1282,9 +1648,7 @@ public final GetIndexResponse get(FunctionDocumentation - * on elastic.co + * @see Documentation on elastic.co */ public GetAliasResponse getAlias(GetAliasRequest request) throws IOException, ElasticsearchException { @@ -1301,9 +1665,7 @@ public GetAliasResponse getAlias(GetAliasRequest request) throws IOException, El * @param fn * a function that initializes a builder to create the * {@link GetAliasRequest} - * @see Documentation - * on elastic.co + * @see Documentation on elastic.co */ public final GetAliasResponse getAlias(Function> fn) @@ -1315,9 +1677,7 @@ public final GetAliasResponse getAlias(FunctionDocumentation - * on elastic.co + * @see Documentation on elastic.co */ public GetAliasResponse getAlias() throws IOException, ElasticsearchException { @@ -1362,6 +1722,21 @@ public final GetDataLifecycleResponse getDataLifecycle( return getDataLifecycle(fn.apply(new GetDataLifecycleRequest.Builder()).build()); } + // ----- Endpoint: indices.get_data_lifecycle_stats + + /** + * Get data stream lifecycle stats. Get statistics about the data streams that + * are managed by a data stream lifecycle. + * + * @see Documentation + * on elastic.co + */ + public GetDataLifecycleStatsResponse getDataLifecycleStats() throws IOException, ElasticsearchException { + return this.transport.performRequest(GetDataLifecycleStatsRequest._INSTANCE, + GetDataLifecycleStatsRequest._ENDPOINT, this.transportOptions); + } + // ----- Endpoint: indices.get_data_stream /** @@ -1416,9 +1791,12 @@ public GetDataStreamResponse getDataStream() throws IOException, ElasticsearchEx * Get mapping definitions. Retrieves mapping definitions for one or more * fields. For data streams, the API retrieves field mappings for the stream’s * backing indices. + *

    + * This API is useful if you don't need a complete mapping or if an index + * mapping contains a large number of fields. * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-get-field-mapping.html">Documentation * on elastic.co */ @@ -1434,12 +1812,15 @@ public GetFieldMappingResponse getFieldMapping(GetFieldMappingRequest request) * Get mapping definitions. Retrieves mapping definitions for one or more * fields. For data streams, the API retrieves field mappings for the stream’s * backing indices. + *

    + * This API is useful if you don't need a complete mapping or if an index + * mapping contains a large number of fields. * * @param fn * a function that initializes a builder to create the * {@link GetFieldMappingRequest} * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-get-field-mapping.html">Documentation * on elastic.co */ @@ -1452,10 +1833,10 @@ public final GetFieldMappingResponse getFieldMapping( // ----- Endpoint: indices.get_index_template /** - * Get index templates. Returns information about one or more index templates. + * Get index templates. Get information about one or more index templates. * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-get-template.html">Documentation * on elastic.co */ @@ -1468,13 +1849,13 @@ public GetIndexTemplateResponse getIndexTemplate(GetIndexTemplateRequest request } /** - * Get index templates. Returns information about one or more index templates. + * Get index templates. Get information about one or more index templates. * * @param fn * a function that initializes a builder to create the * {@link GetIndexTemplateRequest} * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-get-template.html">Documentation * on elastic.co */ @@ -1485,10 +1866,10 @@ public final GetIndexTemplateResponse getIndexTemplate( } /** - * Get index templates. Returns information about one or more index templates. + * Get index templates. Get information about one or more index templates. * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-get-template.html">Documentation * on elastic.co */ @@ -1500,12 +1881,11 @@ public GetIndexTemplateResponse getIndexTemplate() throws IOException, Elasticse // ----- Endpoint: indices.get_mapping /** - * Get mapping definitions. Retrieves mapping definitions for one or more - * indices. For data streams, the API retrieves mappings for the stream’s - * backing indices. + * Get mapping definitions. For data streams, the API retrieves mappings for the + * stream’s backing indices. * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-get-mapping.html">Documentation * on elastic.co */ @@ -1517,15 +1897,14 @@ public GetMappingResponse getMapping(GetMappingRequest request) throws IOExcepti } /** - * Get mapping definitions. Retrieves mapping definitions for one or more - * indices. For data streams, the API retrieves mappings for the stream’s - * backing indices. + * Get mapping definitions. For data streams, the API retrieves mappings for the + * stream’s backing indices. * * @param fn * a function that initializes a builder to create the * {@link GetMappingRequest} * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-get-mapping.html">Documentation * on elastic.co */ @@ -1535,12 +1914,11 @@ public final GetMappingResponse getMapping(FunctionDocumentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-get-mapping.html">Documentation * on elastic.co */ @@ -1552,11 +1930,11 @@ public GetMappingResponse getMapping() throws IOException, ElasticsearchExceptio // ----- Endpoint: indices.get_settings /** - * Get index settings. Returns setting information for one or more indices. For - * data streams, returns setting information for the stream’s backing indices. + * Get index settings. Get setting information for one or more indices. For data + * streams, it returns setting information for the stream's backing indices. * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-get-settings.html">Documentation * on elastic.co */ @@ -1569,14 +1947,14 @@ public GetIndicesSettingsResponse getSettings(GetIndicesSettingsRequest request) } /** - * Get index settings. Returns setting information for one or more indices. For - * data streams, returns setting information for the stream’s backing indices. + * Get index settings. Get setting information for one or more indices. For data + * streams, it returns setting information for the stream's backing indices. * * @param fn * a function that initializes a builder to create the * {@link GetIndicesSettingsRequest} * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-get-settings.html">Documentation * on elastic.co */ @@ -1587,11 +1965,11 @@ public final GetIndicesSettingsResponse getSettings( } /** - * Get index settings. Returns setting information for one or more indices. For - * data streams, returns setting information for the stream’s backing indices. + * Get index settings. Get setting information for one or more indices. For data + * streams, it returns setting information for the stream's backing indices. * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-get-settings.html">Documentation * on elastic.co */ @@ -1603,10 +1981,14 @@ public GetIndicesSettingsResponse getSettings() throws IOException, Elasticsearc // ----- Endpoint: indices.get_template /** - * Get index templates. Retrieves information about one or more index templates. + * Get index templates. Get information about one or more index templates. + *

    + * IMPORTANT: This documentation is about legacy index templates, which are + * deprecated and will be replaced by the composable templates introduced in + * Elasticsearch 7.8. * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-get-template-v1.html">Documentation * on elastic.co */ @@ -1618,13 +2000,17 @@ public GetTemplateResponse getTemplate(GetTemplateRequest request) throws IOExce } /** - * Get index templates. Retrieves information about one or more index templates. + * Get index templates. Get information about one or more index templates. + *

    + * IMPORTANT: This documentation is about legacy index templates, which are + * deprecated and will be replaced by the composable templates introduced in + * Elasticsearch 7.8. * * @param fn * a function that initializes a builder to create the * {@link GetTemplateRequest} * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-get-template-v1.html">Documentation * on elastic.co */ @@ -1635,10 +2021,14 @@ public final GetTemplateResponse getTemplate( } /** - * Get index templates. Retrieves information about one or more index templates. + * Get index templates. Get information about one or more index templates. + *

    + * IMPORTANT: This documentation is about legacy index templates, which are + * deprecated and will be replaced by the composable templates introduced in + * Elasticsearch 7.8. * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-get-template-v1.html">Documentation * on elastic.co */ @@ -1738,11 +2128,42 @@ public final ModifyDataStreamResponse modifyDataStream( // ----- Endpoint: indices.open /** - * Opens a closed index. For data streams, the API opens any closed backing + * Open a closed index. For data streams, the API opens any closed backing * indices. + *

    + * A closed index is blocked for read/write operations and does not allow all + * operations that opened indices allow. It is not possible to index documents + * or to search for documents in a closed index. This allows closed indices to + * not have to maintain internal data structures for indexing or searching + * documents, resulting in a smaller overhead on the cluster. + *

    + * When opening or closing an index, the master is responsible for restarting + * the index shards to reflect the new state of the index. The shards will then + * go through the normal recovery process. The data of opened or closed indices + * is automatically replicated by the cluster to ensure that enough shard copies + * are safely kept around at all times. + *

    + * You can open and close multiple indices. An error is thrown if the request + * explicitly refers to a missing index. This behavior can be turned off by + * using the ignore_unavailable=true parameter. + *

    + * By default, you must explicitly name the indices you are opening or closing. + * To open or close indices with _all, *, or other + * wildcard expressions, change the + * action.destructive_requires_name setting to false. + * This setting can also be changed with the cluster update settings API. + *

    + * Closed indices consume a significant amount of disk-space which can cause + * problems in managed environments. Closing indices can be turned off with the + * cluster settings API by setting cluster.indices.close.enable to + * false. + *

    + * Because opening or closing an index allocates its shards, the + * wait_for_active_shards setting on index creation applies to the + * _open and _close index actions as well. * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-open-close.html">Documentation * on elastic.co */ @@ -1754,14 +2175,45 @@ public OpenResponse open(OpenRequest request) throws IOException, ElasticsearchE } /** - * Opens a closed index. For data streams, the API opens any closed backing + * Open a closed index. For data streams, the API opens any closed backing * indices. + *

    + * A closed index is blocked for read/write operations and does not allow all + * operations that opened indices allow. It is not possible to index documents + * or to search for documents in a closed index. This allows closed indices to + * not have to maintain internal data structures for indexing or searching + * documents, resulting in a smaller overhead on the cluster. + *

    + * When opening or closing an index, the master is responsible for restarting + * the index shards to reflect the new state of the index. The shards will then + * go through the normal recovery process. The data of opened or closed indices + * is automatically replicated by the cluster to ensure that enough shard copies + * are safely kept around at all times. + *

    + * You can open and close multiple indices. An error is thrown if the request + * explicitly refers to a missing index. This behavior can be turned off by + * using the ignore_unavailable=true parameter. + *

    + * By default, you must explicitly name the indices you are opening or closing. + * To open or close indices with _all, *, or other + * wildcard expressions, change the + * action.destructive_requires_name setting to false. + * This setting can also be changed with the cluster update settings API. + *

    + * Closed indices consume a significant amount of disk-space which can cause + * problems in managed environments. Closing indices can be turned off with the + * cluster settings API by setting cluster.indices.close.enable to + * false. + *

    + * Because opening or closing an index allocates its shards, the + * wait_for_active_shards setting on index creation applies to the + * _open and _close index actions as well. * * @param fn * a function that initializes a builder to create the * {@link OpenRequest} * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-open-close.html">Documentation * on elastic.co */ @@ -1910,6 +2362,45 @@ public final PutDataLifecycleResponse putDataLifecycle( /** * Create or update an index template. Index templates define settings, * mappings, and aliases that can be applied automatically to new indices. + *

    + * Elasticsearch applies templates to new indices based on an wildcard pattern + * that matches the index name. Index templates are applied during data stream + * or index creation. For data streams, these settings and mappings are applied + * when the stream's backing indices are created. Settings and mappings + * specified in a create index API request override any settings or mappings + * specified in an index template. Changes to index templates do not affect + * existing indices, including the existing backing indices of a data stream. + *

    + * You can use C-style /* *\/ block comments in index templates. + * You can include comments anywhere in the request body, except before the + * opening curly bracket. + *

    + * Multiple matching templates + *

    + * If multiple index templates match the name of a new index or data stream, the + * template with the highest priority is used. + *

    + * Multiple templates with overlapping index patterns at the same priority are + * not allowed and an error will be thrown when attempting to create a template + * matching an existing index template at identical priorities. + *

    + * Composing aliases, mappings, and settings + *

    + * When multiple component templates are specified in the + * composed_of field for an index template, they are merged in the + * order specified, meaning that later component templates override earlier + * component templates. Any mappings, settings, or aliases from the parent index + * template are merged in next. Finally, any configuration on the index request + * itself is merged. Mapping definitions are merged recursively, which means + * that later mapping components can introduce new field mappings and update the + * mapping configuration. If a field mapping is already contained in an earlier + * component, its definition will be completely overwritten by the later one. + * This recursive merging strategy applies not only to field mappings, but also + * root options like dynamic_templates and meta. If an + * earlier component contains a dynamic_templates block, then by + * default new dynamic_templates entries are appended onto the end. + * If an entry already exists with the same key, then it is overwritten by the + * new definition. * * @see Documentation @@ -1927,6 +2418,45 @@ public PutIndexTemplateResponse putIndexTemplate(PutIndexTemplateRequest request /** * Create or update an index template. Index templates define settings, * mappings, and aliases that can be applied automatically to new indices. + *

    + * Elasticsearch applies templates to new indices based on an wildcard pattern + * that matches the index name. Index templates are applied during data stream + * or index creation. For data streams, these settings and mappings are applied + * when the stream's backing indices are created. Settings and mappings + * specified in a create index API request override any settings or mappings + * specified in an index template. Changes to index templates do not affect + * existing indices, including the existing backing indices of a data stream. + *

    + * You can use C-style /* *\/ block comments in index templates. + * You can include comments anywhere in the request body, except before the + * opening curly bracket. + *

    + * Multiple matching templates + *

    + * If multiple index templates match the name of a new index or data stream, the + * template with the highest priority is used. + *

    + * Multiple templates with overlapping index patterns at the same priority are + * not allowed and an error will be thrown when attempting to create a template + * matching an existing index template at identical priorities. + *

    + * Composing aliases, mappings, and settings + *

    + * When multiple component templates are specified in the + * composed_of field for an index template, they are merged in the + * order specified, meaning that later component templates override earlier + * component templates. Any mappings, settings, or aliases from the parent index + * template are merged in next. Finally, any configuration on the index request + * itself is merged. Mapping definitions are merged recursively, which means + * that later mapping components can introduce new field mappings and update the + * mapping configuration. If a field mapping is already contained in an earlier + * component, its definition will be completely overwritten by the later one. + * This recursive merging strategy applies not only to field mappings, but also + * root options like dynamic_templates and meta. If an + * earlier component contains a dynamic_templates block, then by + * default new dynamic_templates entries are appended onto the end. + * If an entry already exists with the same key, then it is overwritten by the + * new definition. * * @param fn * a function that initializes a builder to create the @@ -1945,13 +2475,44 @@ public final PutIndexTemplateResponse putIndexTemplate( // ----- Endpoint: indices.put_mapping /** - * Update field mappings. Adds new fields to an existing data stream or index. - * You can also use this API to change the search settings of existing fields. - * For data streams, these changes are applied to all backing indices by - * default. + * Update field mappings. Add new fields to an existing data stream or index. + * You can also use this API to change the search settings of existing fields + * and add new properties to existing object fields. For data streams, these + * changes are applied to all backing indices by default. + *

    + * Add multi-fields to an existing field + *

    + * Multi-fields let you index the same field in different ways. You can use this + * API to update the fields mapping parameter and enable multi-fields for an + * existing field. WARNING: If an index (or data stream) contains documents when + * you add a multi-field, those documents will not have values for the new + * multi-field. You can populate the new multi-field with the update by query + * API. + *

    + * Change supported mapping parameters for an existing field + *

    + * The documentation for each mapping parameter indicates whether you can update + * it for an existing field using this API. For example, you can use the update + * mapping API to update the ignore_above parameter. + *

    + * Change the mapping of an existing field + *

    + * Except for supported mapping parameters, you can't change the mapping or + * field type of an existing field. Changing an existing field could invalidate + * data that's already indexed. + *

    + * If you need to change the mapping of a field in a data stream's backing + * indices, refer to documentation about modifying data streams. If you need to + * change the mapping of a field in other indices, create a new index with the + * correct mapping and reindex your data into that index. + *

    + * Rename a field + *

    + * Renaming a field would invalidate data already indexed under the old field + * name. Instead, add an alias field to create an alternate field name. * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-put-mapping.html">Documentation * on elastic.co */ @@ -1963,16 +2524,47 @@ public PutMappingResponse putMapping(PutMappingRequest request) throws IOExcepti } /** - * Update field mappings. Adds new fields to an existing data stream or index. - * You can also use this API to change the search settings of existing fields. - * For data streams, these changes are applied to all backing indices by - * default. + * Update field mappings. Add new fields to an existing data stream or index. + * You can also use this API to change the search settings of existing fields + * and add new properties to existing object fields. For data streams, these + * changes are applied to all backing indices by default. + *

    + * Add multi-fields to an existing field + *

    + * Multi-fields let you index the same field in different ways. You can use this + * API to update the fields mapping parameter and enable multi-fields for an + * existing field. WARNING: If an index (or data stream) contains documents when + * you add a multi-field, those documents will not have values for the new + * multi-field. You can populate the new multi-field with the update by query + * API. + *

    + * Change supported mapping parameters for an existing field + *

    + * The documentation for each mapping parameter indicates whether you can update + * it for an existing field using this API. For example, you can use the update + * mapping API to update the ignore_above parameter. + *

    + * Change the mapping of an existing field + *

    + * Except for supported mapping parameters, you can't change the mapping or + * field type of an existing field. Changing an existing field could invalidate + * data that's already indexed. + *

    + * If you need to change the mapping of a field in a data stream's backing + * indices, refer to documentation about modifying data streams. If you need to + * change the mapping of a field in other indices, create a new index with the + * correct mapping and reindex your data into that index. + *

    + * Rename a field + *

    + * Renaming a field would invalidate data already indexed under the old field + * name. Instead, add an alias field to create an alternate field name. * * @param fn * a function that initializes a builder to create the * {@link PutMappingRequest} * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-put-mapping.html">Documentation * on elastic.co */ @@ -1986,9 +2578,26 @@ public final PutMappingResponse putMapping(Function + * To revert a setting to the default value, use a null value. The list of + * per-index settings that can be updated dynamically on live indices can be + * found in index module documentation. To preserve existing settings from being + * updated, set the preserve_existing parameter to + * true. + *

    + * NOTE: You can only define new analyzers on closed indices. To add an + * analyzer, you must close the index, define the analyzer, and reopen the + * index. You cannot close the write index of a data stream. To update the + * analyzer for a data stream's write index and future backing indices, update + * the analyzer in the index template used by the stream. Then roll over the + * data stream to apply the new analyzer to the stream's write index and future + * backing indices. This affects searches and any new data added to the stream + * after the rollover. However, it does not affect the data stream's backing + * indices or their existing data. To change the analyzer for existing backing + * indices, you must create a new data stream and reindex your data into it. * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-update-settings.html">Documentation * on elastic.co */ @@ -2003,12 +2612,29 @@ public PutIndicesSettingsResponse putSettings(PutIndicesSettingsRequest request) /** * Update index settings. Changes dynamic index settings in real time. For data * streams, index setting changes are applied to all backing indices by default. + *

    + * To revert a setting to the default value, use a null value. The list of + * per-index settings that can be updated dynamically on live indices can be + * found in index module documentation. To preserve existing settings from being + * updated, set the preserve_existing parameter to + * true. + *

    + * NOTE: You can only define new analyzers on closed indices. To add an + * analyzer, you must close the index, define the analyzer, and reopen the + * index. You cannot close the write index of a data stream. To update the + * analyzer for a data stream's write index and future backing indices, update + * the analyzer in the index template used by the stream. Then roll over the + * data stream to apply the new analyzer to the stream's write index and future + * backing indices. This affects searches and any new data added to the stream + * after the rollover. However, it does not affect the data stream's backing + * indices or their existing data. To change the analyzer for existing backing + * indices, you must create a new data stream and reindex your data into it. * * @param fn * a function that initializes a builder to create the * {@link PutIndicesSettingsRequest} * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-update-settings.html">Documentation * on elastic.co */ @@ -2021,9 +2647,26 @@ public final PutIndicesSettingsResponse putSettings( /** * Update index settings. Changes dynamic index settings in real time. For data * streams, index setting changes are applied to all backing indices by default. + *

    + * To revert a setting to the default value, use a null value. The list of + * per-index settings that can be updated dynamically on live indices can be + * found in index module documentation. To preserve existing settings from being + * updated, set the preserve_existing parameter to + * true. + *

    + * NOTE: You can only define new analyzers on closed indices. To add an + * analyzer, you must close the index, define the analyzer, and reopen the + * index. You cannot close the write index of a data stream. To update the + * analyzer for a data stream's write index and future backing indices, update + * the analyzer in the index template used by the stream. Then roll over the + * data stream to apply the new analyzer to the stream's write index and future + * backing indices. This affects searches and any new data added to the stream + * after the rollover. However, it does not affect the data stream's backing + * indices or their existing data. To change the analyzer for existing backing + * indices, you must create a new data stream and reindex your data into it. * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-update-settings.html">Documentation * on elastic.co */ @@ -2052,9 +2695,22 @@ public PutIndicesSettingsResponse putSettings() throws IOException, Elasticsearc * templates do not affect existing indices. Settings and mappings specified in * create index API requests override any settings or mappings specified in an * index template. + *

    + * You can use C-style /* *\/ block comments in index templates. + * You can include comments anywhere in the request body, except before the + * opening curly bracket. + *

    + * Indices matching multiple templates + *

    + * Multiple index templates can potentially match an index, in this case, both + * the settings and mappings are merged into the final configuration of the + * index. The order of the merging can be controlled using the order parameter, + * with lower order being applied first, and higher orders overriding them. + * NOTE: Multiple matching templates with the same order value will result in a + * non-deterministic merging order. * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-templates-v1.html">Documentation * on elastic.co */ @@ -2083,12 +2739,25 @@ public PutTemplateResponse putTemplate(PutTemplateRequest request) throws IOExce * templates do not affect existing indices. Settings and mappings specified in * create index API requests override any settings or mappings specified in an * index template. + *

    + * You can use C-style /* *\/ block comments in index templates. + * You can include comments anywhere in the request body, except before the + * opening curly bracket. + *

    + * Indices matching multiple templates + *

    + * Multiple index templates can potentially match an index, in this case, both + * the settings and mappings are merged into the final configuration of the + * index. The order of the merging can be controlled using the order parameter, + * with lower order being applied first, and higher orders overriding them. + * NOTE: Multiple matching templates with the same order value will result in a + * non-deterministic merging order. * * @param fn * a function that initializes a builder to create the * {@link PutTemplateRequest} * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-templates-v1.html">Documentation * on elastic.co */ @@ -2105,6 +2774,9 @@ public final PutTemplateResponse putTemplate( * shard recoveries for one or more indices. For data streams, the API returns * information for the stream's backing indices. *

    + * All recoveries, whether ongoing or complete, are kept in the cluster state + * and may be reported on at any time. + *

    * Shard recovery is the process of initializing a shard copy, such as restoring * a primary shard from a snapshot or creating a replica shard from a primary * shard. When a shard recovery completes, the recovered shard is available for @@ -2134,7 +2806,7 @@ public final PutTemplateResponse putTemplate( * the recovery API. * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-recovery.html">Documentation * on elastic.co */ @@ -2150,6 +2822,9 @@ public RecoveryResponse recovery(RecoveryRequest request) throws IOException, El * shard recoveries for one or more indices. For data streams, the API returns * information for the stream's backing indices. *

    + * All recoveries, whether ongoing or complete, are kept in the cluster state + * and may be reported on at any time. + *

    * Shard recovery is the process of initializing a shard copy, such as restoring * a primary shard from a snapshot or creating a replica shard from a primary * shard. When a shard recovery completes, the recovered shard is available for @@ -2182,7 +2857,7 @@ public RecoveryResponse recovery(RecoveryRequest request) throws IOException, El * a function that initializes a builder to create the * {@link RecoveryRequest} * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-recovery.html">Documentation * on elastic.co */ @@ -2196,6 +2871,9 @@ public final RecoveryResponse recovery(Function + * All recoveries, whether ongoing or complete, are kept in the cluster state + * and may be reported on at any time. + *

    * Shard recovery is the process of initializing a shard copy, such as restoring * a primary shard from a snapshot or creating a replica shard from a primary * shard. When a shard recovery completes, the recovered shard is available for @@ -2225,7 +2903,7 @@ public final RecoveryResponse recovery(FunctionDocumentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-recovery.html">Documentation * on elastic.co */ @@ -2240,9 +2918,26 @@ public RecoveryResponse recovery() throws IOException, ElasticsearchException { * Refresh an index. A refresh makes recent operations performed on one or more * indices available for search. For data streams, the API runs the refresh * operation on the stream’s backing indices. + *

    + * By default, Elasticsearch periodically refreshes indices every second, but + * only on indices that have received one search request or more in the last 30 + * seconds. You can change this default interval with the + * index.refresh_interval setting. + *

    + * Refresh requests are synchronous and do not return a response until the + * refresh operation completes. + *

    + * Refreshes are resource-intensive. To ensure good cluster performance, it's + * recommended to wait for Elasticsearch's periodic refresh rather than + * performing an explicit refresh when possible. + *

    + * If your application workflow indexes documents and then runs a search to + * retrieve the indexed document, it's recommended to use the index API's + * refresh=wait_for query parameter option. This option ensures the + * indexing operation waits for a periodic refresh before running the search. * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-refresh.html">Documentation * on elastic.co */ @@ -2257,12 +2952,29 @@ public RefreshResponse refresh(RefreshRequest request) throws IOException, Elast * Refresh an index. A refresh makes recent operations performed on one or more * indices available for search. For data streams, the API runs the refresh * operation on the stream’s backing indices. + *

    + * By default, Elasticsearch periodically refreshes indices every second, but + * only on indices that have received one search request or more in the last 30 + * seconds. You can change this default interval with the + * index.refresh_interval setting. + *

    + * Refresh requests are synchronous and do not return a response until the + * refresh operation completes. + *

    + * Refreshes are resource-intensive. To ensure good cluster performance, it's + * recommended to wait for Elasticsearch's periodic refresh rather than + * performing an explicit refresh when possible. + *

    + * If your application workflow indexes documents and then runs a search to + * retrieve the indexed document, it's recommended to use the index API's + * refresh=wait_for query parameter option. This option ensures the + * indexing operation waits for a periodic refresh before running the search. * * @param fn * a function that initializes a builder to create the * {@link RefreshRequest} * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-refresh.html">Documentation * on elastic.co */ @@ -2275,9 +2987,26 @@ public final RefreshResponse refresh(Function + * By default, Elasticsearch periodically refreshes indices every second, but + * only on indices that have received one search request or more in the last 30 + * seconds. You can change this default interval with the + * index.refresh_interval setting. + *

    + * Refresh requests are synchronous and do not return a response until the + * refresh operation completes. + *

    + * Refreshes are resource-intensive. To ensure good cluster performance, it's + * recommended to wait for Elasticsearch's periodic refresh rather than + * performing an explicit refresh when possible. + *

    + * If your application workflow indexes documents and then runs a search to + * retrieve the indexed document, it's recommended to use the index API's + * refresh=wait_for query parameter option. This option ensures the + * indexing operation waits for a periodic refresh before running the search. * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-refresh.html">Documentation * on elastic.co */ @@ -2313,7 +3042,7 @@ public RefreshResponse refresh() throws IOException, ElasticsearchException { * in case shards are relocated in the future. * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-reload-analyzers.html">Documentation * on elastic.co */ @@ -2353,7 +3082,7 @@ public ReloadSearchAnalyzersResponse reloadSearchAnalyzers(ReloadSearchAnalyzers * a function that initializes a builder to create the * {@link ReloadSearchAnalyzersRequest} * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-reload-analyzers.html">Documentation * on elastic.co */ @@ -2392,9 +3121,39 @@ public final ReloadSearchAnalyzersResponse reloadSearchAnalyzers( *

  • Cluster version information, including the Elasticsearch server * version.
  • * + *

    + * For example, + * GET /_resolve/cluster/my-index-*,cluster*:my-index-* returns + * information about the local cluster and all remotely configured clusters that + * start with the alias cluster*. Each cluster returns information + * about whether it has any indices, aliases or data streams that match + * my-index-*. + *

    + * Advantages of using this endpoint before a cross-cluster + * search + *

    + * You may want to exclude a cluster or index from a search when: + *

      + *
    • A remote cluster is not currently connected and is configured with + * skip_unavailable=false. Running a cross-cluster search under + * those conditions will cause the entire search to fail.
    • + *
    • A cluster has no matching indices, aliases or data streams for the index + * expression (or your user does not have permissions to search them). For + * example, suppose your index expression is logs*,remote1:logs* + * and the remote1 cluster has no indices, aliases or data streams that match + * logs*. In that case, that cluster will return no results from + * that cluster if you include it in a cross-cluster search.
    • + *
    • The index expression (combined with any query parameters you specify) + * will likely cause an exception to be thrown when you do the search. In these + * cases, the "error" field in the _resolve/cluster + * response will be present. (This is also where security/permission errors will + * be shown.)
    • + *
    • A remote cluster is an older version that does not support the feature + * you want to use in your search.
    • + *
    * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-resolve-cluster-api.html">Documentation * on elastic.co */ @@ -2433,12 +3192,42 @@ public ResolveClusterResponse resolveCluster(ResolveClusterRequest request) *
  • Cluster version information, including the Elasticsearch server * version.
  • * + *

    + * For example, + * GET /_resolve/cluster/my-index-*,cluster*:my-index-* returns + * information about the local cluster and all remotely configured clusters that + * start with the alias cluster*. Each cluster returns information + * about whether it has any indices, aliases or data streams that match + * my-index-*. + *

    + * Advantages of using this endpoint before a cross-cluster + * search + *

    + * You may want to exclude a cluster or index from a search when: + *

      + *
    • A remote cluster is not currently connected and is configured with + * skip_unavailable=false. Running a cross-cluster search under + * those conditions will cause the entire search to fail.
    • + *
    • A cluster has no matching indices, aliases or data streams for the index + * expression (or your user does not have permissions to search them). For + * example, suppose your index expression is logs*,remote1:logs* + * and the remote1 cluster has no indices, aliases or data streams that match + * logs*. In that case, that cluster will return no results from + * that cluster if you include it in a cross-cluster search.
    • + *
    • The index expression (combined with any query parameters you specify) + * will likely cause an exception to be thrown when you do the search. In these + * cases, the "error" field in the _resolve/cluster + * response will be present. (This is also where security/permission errors will + * be shown.)
    • + *
    • A remote cluster is an older version that does not support the feature + * you want to use in your search.
    • + *
    * * @param fn * a function that initializes a builder to create the * {@link ResolveClusterRequest} * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-resolve-cluster-api.html">Documentation * on elastic.co */ @@ -2456,7 +3245,7 @@ public final ResolveClusterResponse resolveCluster( * supported. * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-resolve-index-api.html">Documentation * on elastic.co */ @@ -2476,7 +3265,7 @@ public ResolveIndexResponse resolveIndex(ResolveIndexRequest request) throws IOE * a function that initializes a builder to create the * {@link ResolveIndexRequest} * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-resolve-index-api.html">Documentation * on elastic.co */ @@ -2489,8 +3278,57 @@ public final ResolveIndexResponse resolveIndex( // ----- Endpoint: indices.rollover /** - * Roll over to a new index. Creates a new index for a data stream or index + * Roll over to a new index. TIP: It is recommended to use the index lifecycle + * rollover action to automate rollovers. + *

    + * The rollover API creates a new index for a data stream or index alias. The + * API behavior depends on the rollover target. + *

    + * Roll over a data stream + *

    + * If you roll over a data stream, the API creates a new write index for the + * stream. The stream's previous write index becomes a regular backing index. A + * rollover also increments the data stream's generation. + *

    + * Roll over an index alias with a write index + *

    + * TIP: Prior to Elasticsearch 7.9, you'd typically use an index alias with a + * write index to manage time series data. Data streams replace this + * functionality, require less maintenance, and automatically integrate with + * data tiers. + *

    + * If an index alias points to multiple indices, one of the indices must be a + * write index. The rollover API creates a new write index for the alias with + * is_write_index set to true. The API also + * sets is_write_index to false for the previous write + * index. + *

    + * Roll over an index alias with one index + *

    + * If you roll over an index alias that points to only one index, the API + * creates a new index for the alias and removes the original index from the * alias. + *

    + * NOTE: A rollover creates a new index and is subject to the + * wait_for_active_shards setting. + *

    + * Increment index names for an alias + *

    + * When you roll over an index alias, you can specify a name for the new index. + * If you don't specify a name and the current index ends with - + * and a number, such as my-index-000001 or + * my-index-3, the new index name increments that number. For + * example, if you roll over an alias with a current index of + * my-index-000001, the rollover creates a new index named + * my-index-000002. This number is always six characters and + * zero-padded, regardless of the previous index's name. + *

    + * If you use an index alias for time series data, you can use date math in the + * index name to track the rollover date. For example, you can create an alias + * that points to an index named <my-index-{now/d}-000001>. + * If you create the index on May 6, 2099, the index's name is + * my-index-2099.05.06-000001. If you roll over the alias on May 7, + * 2099, the new index's name is my-index-2099.05.07-000002. * * @see Documentation @@ -2505,8 +3343,57 @@ public RolloverResponse rollover(RolloverRequest request) throws IOException, El } /** - * Roll over to a new index. Creates a new index for a data stream or index + * Roll over to a new index. TIP: It is recommended to use the index lifecycle + * rollover action to automate rollovers. + *

    + * The rollover API creates a new index for a data stream or index alias. The + * API behavior depends on the rollover target. + *

    + * Roll over a data stream + *

    + * If you roll over a data stream, the API creates a new write index for the + * stream. The stream's previous write index becomes a regular backing index. A + * rollover also increments the data stream's generation. + *

    + * Roll over an index alias with a write index + *

    + * TIP: Prior to Elasticsearch 7.9, you'd typically use an index alias with a + * write index to manage time series data. Data streams replace this + * functionality, require less maintenance, and automatically integrate with + * data tiers. + *

    + * If an index alias points to multiple indices, one of the indices must be a + * write index. The rollover API creates a new write index for the alias with + * is_write_index set to true. The API also + * sets is_write_index to false for the previous write + * index. + *

    + * Roll over an index alias with one index + *

    + * If you roll over an index alias that points to only one index, the API + * creates a new index for the alias and removes the original index from the * alias. + *

    + * NOTE: A rollover creates a new index and is subject to the + * wait_for_active_shards setting. + *

    + * Increment index names for an alias + *

    + * When you roll over an index alias, you can specify a name for the new index. + * If you don't specify a name and the current index ends with - + * and a number, such as my-index-000001 or + * my-index-3, the new index name increments that number. For + * example, if you roll over an alias with a current index of + * my-index-000001, the rollover creates a new index named + * my-index-000002. This number is always six characters and + * zero-padded, regardless of the previous index's name. + *

    + * If you use an index alias for time series data, you can use date math in the + * index name to track the rollover date. For example, you can create an alias + * that points to an index named <my-index-{now/d}-000001>. + * If you create the index on May 6, 2099, the index's name is + * my-index-2099.05.06-000001. If you roll over the alias on May 7, + * 2099, the new index's name is my-index-2099.05.07-000002. * * @param fn * a function that initializes a builder to create the @@ -2529,7 +3416,7 @@ public final RolloverResponse rollover(FunctionDocumentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-segments.html">Documentation * on elastic.co */ @@ -2549,7 +3436,7 @@ public SegmentsResponse segments(SegmentsRequest request) throws IOException, El * a function that initializes a builder to create the * {@link SegmentsRequest} * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-segments.html">Documentation * on elastic.co */ @@ -2564,7 +3451,7 @@ public final SegmentsResponse segments(FunctionDocumentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-segments.html">Documentation * on elastic.co */ @@ -2593,7 +3480,7 @@ public SegmentsResponse segments() throws IOException, ElasticsearchException { * are unassigned or have one or more unassigned replica shards. * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-shards-stores.html">Documentation * on elastic.co */ @@ -2625,7 +3512,7 @@ public ShardStoresResponse shardStores(ShardStoresRequest request) throws IOExce * a function that initializes a builder to create the * {@link ShardStoresRequest} * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-shards-stores.html">Documentation * on elastic.co */ @@ -2653,7 +3540,7 @@ public final ShardStoresResponse shardStores( * are unassigned or have one or more unassigned replica shards. * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-shards-stores.html">Documentation * on elastic.co */ @@ -2806,11 +3693,11 @@ public final ShrinkResponse shrink(FunctionDocumentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/{master}/indices-simulate-index.html">Documentation * on elastic.co */ @@ -2823,14 +3710,14 @@ public SimulateIndexTemplateResponse simulateIndexTemplate(SimulateIndexTemplate } /** - * Simulate an index. Returns the index configuration that would be applied to - * the specified index from an existing index template. + * Simulate an index. Get the index configuration that would be applied to the + * specified index from an existing index template. * * @param fn * a function that initializes a builder to create the * {@link SimulateIndexTemplateRequest} * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/{master}/indices-simulate-index.html">Documentation * on elastic.co */ @@ -2843,11 +3730,11 @@ public final SimulateIndexTemplateResponse simulateIndexTemplate( // ----- Endpoint: indices.simulate_template /** - * Simulate an index template. Returns the index configuration that would be - * applied by a particular index template. + * Simulate an index template. Get the index configuration that would be applied + * by a particular index template. * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-simulate-template.html">Documentation * on elastic.co */ @@ -2860,14 +3747,14 @@ public SimulateTemplateResponse simulateTemplate(SimulateTemplateRequest request } /** - * Simulate an index template. Returns the index configuration that would be - * applied by a particular index template. + * Simulate an index template. Get the index configuration that would be applied + * by a particular index template. * * @param fn * a function that initializes a builder to create the * {@link SimulateTemplateRequest} * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-simulate-template.html">Documentation * on elastic.co */ @@ -2878,11 +3765,11 @@ public final SimulateTemplateResponse simulateTemplate( } /** - * Simulate an index template. Returns the index configuration that would be - * applied by a particular index template. + * Simulate an index template. Get the index configuration that would be applied + * by a particular index template. * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-simulate-template.html">Documentation * on elastic.co */ @@ -2913,6 +3800,18 @@ public SimulateTemplateResponse simulateTemplate() throws IOException, Elasticse * * *

    + * You can do make an index read-only with the following request using the add + * index block API: + * + *

    +	 * PUT /my_source_index/_block/write
    +	 * 
    +	 * 
    + *

    + * The current write index on a data stream cannot be split. In order to split + * the current write index, the data stream must first be rolled over so that a + * new write index is created and then the previous write index can be split. + *

    * The number of times the index can be split (and the number of shards that * each original shard can be split into) is determined by the * index.number_of_routing_shards setting. The number of routing @@ -2978,6 +3877,18 @@ public SplitResponse split(SplitRequest request) throws IOException, Elasticsear * * *

    + * You can do make an index read-only with the following request using the add + * index block API: + * + *

    +	 * PUT /my_source_index/_block/write
    +	 * 
    +	 * 
    + *

    + * The current write index on a data stream cannot be split. In order to split + * the current write index, the data stream must first be rolled over so that a + * new write index is created and then the previous write index can be split. + *

    * The number of times the index can be split (and the number of shards that * each original shard can be split into) is determined by the * index.number_of_routing_shards setting. The number of routing @@ -3044,7 +3955,7 @@ public final SplitResponse split(FunctionDocumentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-stats.html">Documentation * on elastic.co */ @@ -3076,7 +3987,7 @@ public IndicesStatsResponse stats(IndicesStatsRequest request) throws IOExceptio * a function that initializes a builder to create the * {@link IndicesStatsRequest} * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-stats.html">Documentation * on elastic.co */ @@ -3104,7 +4015,7 @@ public final IndicesStatsResponse stats( * any node-level statistics to which the shard contributed. * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-stats.html">Documentation * on elastic.co */ @@ -3120,7 +4031,7 @@ public IndicesStatsResponse stats() throws IOException, ElasticsearchException { * the normal recovery process and becomes writeable again. * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/unfreeze-index-api.html">Documentation * on elastic.co */ @@ -3139,7 +4050,7 @@ public UnfreezeResponse unfreeze(UnfreezeRequest request) throws IOException, El * a function that initializes a builder to create the * {@link UnfreezeRequest} * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/unfreeze-index-api.html">Documentation * on elastic.co */ diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/ExistsRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/ExistsRequest.java index 1e11e63ea..57b1ed678 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/ExistsRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/ExistsRequest.java @@ -61,7 +61,7 @@ // typedef: indices.exists.Request /** - * Check indices. Checks if one or more indices, index aliases, or data streams + * Check indices. Check if one or more indices, index aliases, or data streams * exist. * * @see API diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/ExistsTemplateRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/ExistsTemplateRequest.java index c51a68685..20a524acf 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/ExistsTemplateRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/ExistsTemplateRequest.java @@ -61,8 +61,13 @@ // typedef: indices.exists_template.Request /** - * Check existence of index templates. Returns information about whether a - * particular index template exists. + * Check existence of index templates. Get information about whether index + * templates exist. Index templates define settings, mappings, and aliases that + * can be applied automatically to new indices. + *

    + * IMPORTANT: This documentation is about legacy index templates, which are + * deprecated and will be replaced by the composable templates introduced in + * Elasticsearch 7.8. * * @see API * specification @@ -96,7 +101,7 @@ public static ExistsTemplateRequest of(Function * API name: {@code flat_settings} */ @@ -106,8 +111,7 @@ public final Boolean flatSettings() { } /** - * Return local information, do not retrieve the state from master node - * (default: false) + * Indicates whether to get information from the local node only. *

    * API name: {@code local} */ @@ -117,7 +121,9 @@ public final Boolean local() { } /** - * Explicit operation timeout for connection to master node + * The period to wait for the master node. If the master node is not available + * before the timeout expires, the request fails and returns an error. To + * indicate that the request should never timeout, set it to -1. *

    * API name: {@code master_timeout} */ @@ -127,7 +133,8 @@ public final Time masterTimeout() { } /** - * Required - The comma separated names of the index templates + * Required - A comma-separated list of index template names used to limit the + * request. Wildcard (*) expressions are supported. *

    * API name: {@code name} */ @@ -156,7 +163,7 @@ public static class Builder extends RequestBase.AbstractBuilder private List name; /** - * Return settings in flat format (default: false) + * Indicates whether to use a flat format for the response. *

    * API name: {@code flat_settings} */ @@ -166,8 +173,7 @@ public final Builder flatSettings(@Nullable Boolean value) { } /** - * Return local information, do not retrieve the state from master node - * (default: false) + * Indicates whether to get information from the local node only. *

    * API name: {@code local} */ @@ -177,7 +183,9 @@ public final Builder local(@Nullable Boolean value) { } /** - * Explicit operation timeout for connection to master node + * The period to wait for the master node. If the master node is not available + * before the timeout expires, the request fails and returns an error. To + * indicate that the request should never timeout, set it to -1. *

    * API name: {@code master_timeout} */ @@ -187,7 +195,9 @@ public final Builder masterTimeout(@Nullable Time value) { } /** - * Explicit operation timeout for connection to master node + * The period to wait for the master node. If the master node is not available + * before the timeout expires, the request fails and returns an error. To + * indicate that the request should never timeout, set it to -1. *

    * API name: {@code master_timeout} */ @@ -196,7 +206,8 @@ public final Builder masterTimeout(Function> f } /** - * Required - The comma separated names of the index templates + * Required - A comma-separated list of index template names used to limit the + * request. Wildcard (*) expressions are supported. *

    * API name: {@code name} *

    @@ -208,7 +219,8 @@ public final Builder name(List list) { } /** - * Required - The comma separated names of the index templates + * Required - A comma-separated list of index template names used to limit the + * request. Wildcard (*) expressions are supported. *

    * API name: {@code name} *

    diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/FieldUsageStatsRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/FieldUsageStatsRequest.java index 9a5822f96..c041a2f05 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/FieldUsageStatsRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/FieldUsageStatsRequest.java @@ -66,6 +66,11 @@ * are running on a cluster. A shard-level search request that accesses a given * field, even if multiple times during that request, is counted as a single * use. + *

    + * The response body reports the per-shard usage count of the data structures + * that back the fields in the index. A given request will increment each count + * by a maximum value of 1, even if the request accesses the same field multiple + * times. * * @see API diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/ForcemergeRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/ForcemergeRequest.java index 81db9161d..c1123432d 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/ForcemergeRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/ForcemergeRequest.java @@ -79,7 +79,61 @@ * rapidly, resulting in higher disk usage and worse search performance. If you * regularly force merge an index receiving writes, this can also make snapshots * more expensive, since the new documents can't be backed up incrementally. + *

    + * Blocks during a force merge + *

    + * Calls to this API block until the merge is complete (unless request contains + * wait_for_completion=false). If the client connection is lost + * before completion then the force merge process will continue in the + * background. Any new requests to force merge the same indices will also block + * until the ongoing force merge is complete. + *

    + * Running force merge asynchronously + *

    + * If the request contains wait_for_completion=false, Elasticsearch + * performs some preflight checks, launches the request, and returns a task you + * can use to get the status of the task. However, you can not cancel this task + * as the force merge task is not cancelable. Elasticsearch creates a record of + * this task as a document at _tasks/<task_id>. When you are + * done with a task, you should delete the task document so Elasticsearch can + * reclaim the space. + *

    + * Force merging multiple indices + *

    + * You can force merge multiple indices with a single request by targeting: + *

    + *

    + * Each targeted shard is force-merged separately using the force_merge + * threadpool. By default each node only has a single force_merge + * thread which means that the shards on that node are force-merged one at a + * time. If you expand the force_merge threadpool on a node then it + * will force merge its shards in parallel + *

    + * Force merge makes the storage for the shard being merged temporarily + * increase, as it may require free space up to triple its size in case + * max_num_segments parameter is set to 1, to rewrite + * all segments into a new one. + *

    + * Data streams and time-based indices + *

    + * Force-merging is useful for managing a data stream's older backing indices + * and other time-based indices, particularly after a rollover. In these cases, + * each index only receives indexing traffic for a certain period of time. Once + * an index receive no more writes, its shards can be force-merged to a single + * segment. This can be a good idea because single-segment shards can sometimes + * use simpler and more efficient data structures to perform searches. For + * example: * + *

    + * POST /.ds-my-data-stream-2099.03.07-000001/_forcemerge?max_num_segments=1
    + * 
    + * 
    + * * @see
    API * specification */ diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/GetDataLifecycleStatsRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/GetDataLifecycleStatsRequest.java new file mode 100644 index 000000000..ecdf002dd --- /dev/null +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/GetDataLifecycleStatsRequest.java @@ -0,0 +1,100 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package co.elastic.clients.elasticsearch.indices; + +import co.elastic.clients.elasticsearch._types.ErrorResponse; +import co.elastic.clients.elasticsearch._types.RequestBase; +import co.elastic.clients.json.JsonpDeserializable; +import co.elastic.clients.json.JsonpDeserializer; +import co.elastic.clients.json.ObjectBuilderDeserializer; +import co.elastic.clients.json.ObjectDeserializer; +import co.elastic.clients.transport.Endpoint; +import co.elastic.clients.transport.endpoints.SimpleEndpoint; +import co.elastic.clients.util.ObjectBuilder; +import jakarta.json.stream.JsonGenerator; +import java.util.Collections; +import java.util.Objects; + +//---------------------------------------------------------------- +// THIS CODE IS GENERATED. MANUAL EDITS WILL BE LOST. +//---------------------------------------------------------------- +// +// This code is generated from the Elasticsearch API specification +// at https://github.com/elastic/elasticsearch-specification +// +// Manual updates to this file will be lost when the code is +// re-generated. +// +// If you find a property that is missing or wrongly typed, please +// open an issue or a PR on the API specification repository. +// +//---------------------------------------------------------------- + +// typedef: indices.get_data_lifecycle_stats.Request + +/** + * Get data stream lifecycle stats. Get statistics about the data streams that + * are managed by a data stream lifecycle. + * + * @see API + * specification + */ + +public class GetDataLifecycleStatsRequest extends RequestBase { + public GetDataLifecycleStatsRequest() { + } + + /** + * Singleton instance for {@link GetDataLifecycleStatsRequest}. + */ + public static final GetDataLifecycleStatsRequest _INSTANCE = new GetDataLifecycleStatsRequest(); + + // --------------------------------------------------------------------------------------------- + + /** + * Endpoint "{@code indices.get_data_lifecycle_stats}". + */ + public static final Endpoint _ENDPOINT = new SimpleEndpoint<>( + "es/indices.get_data_lifecycle_stats", + + // Request method + request -> { + return "GET"; + + }, + + // Request path + request -> { + return "/_lifecycle/stats"; + + }, + + // Path parameters + request -> { + return Collections.emptyMap(); + }, + + // Request parameters + request -> { + return Collections.emptyMap(); + + }, SimpleEndpoint.emptyMap(), false, GetDataLifecycleStatsResponse._DESERIALIZER); +} diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/GetDataLifecycleStatsResponse.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/GetDataLifecycleStatsResponse.java new file mode 100644 index 000000000..603c06cb1 --- /dev/null +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/GetDataLifecycleStatsResponse.java @@ -0,0 +1,303 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package co.elastic.clients.elasticsearch.indices; + +import co.elastic.clients.elasticsearch.indices.get_data_lifecycle_stats.DataStreamStats; +import co.elastic.clients.json.JsonpDeserializable; +import co.elastic.clients.json.JsonpDeserializer; +import co.elastic.clients.json.JsonpMapper; +import co.elastic.clients.json.JsonpSerializable; +import co.elastic.clients.json.JsonpUtils; +import co.elastic.clients.json.ObjectBuilderDeserializer; +import co.elastic.clients.json.ObjectDeserializer; +import co.elastic.clients.util.ApiTypeHelper; +import co.elastic.clients.util.ObjectBuilder; +import co.elastic.clients.util.WithJsonObjectBuilderBase; +import jakarta.json.stream.JsonGenerator; +import java.lang.Integer; +import java.lang.Long; +import java.util.List; +import java.util.Objects; +import java.util.function.Function; +import javax.annotation.Nullable; + +//---------------------------------------------------------------- +// THIS CODE IS GENERATED. MANUAL EDITS WILL BE LOST. +//---------------------------------------------------------------- +// +// This code is generated from the Elasticsearch API specification +// at https://github.com/elastic/elasticsearch-specification +// +// Manual updates to this file will be lost when the code is +// re-generated. +// +// If you find a property that is missing or wrongly typed, please +// open an issue or a PR on the API specification repository. +// +//---------------------------------------------------------------- + +// typedef: indices.get_data_lifecycle_stats.Response + +/** + * + * @see API + * specification + */ +@JsonpDeserializable +public class GetDataLifecycleStatsResponse implements JsonpSerializable { + private final int dataStreamCount; + + private final List dataStreams; + + @Nullable + private final Long lastRunDurationInMillis; + + @Nullable + private final Long timeBetweenStartsInMillis; + + // --------------------------------------------------------------------------------------------- + + private GetDataLifecycleStatsResponse(Builder builder) { + + this.dataStreamCount = ApiTypeHelper.requireNonNull(builder.dataStreamCount, this, "dataStreamCount"); + this.dataStreams = ApiTypeHelper.unmodifiableRequired(builder.dataStreams, this, "dataStreams"); + this.lastRunDurationInMillis = builder.lastRunDurationInMillis; + this.timeBetweenStartsInMillis = builder.timeBetweenStartsInMillis; + + } + + public static GetDataLifecycleStatsResponse of(Function> fn) { + return fn.apply(new Builder()).build(); + } + + /** + * Required - The count of data streams currently being managed by the data + * stream lifecycle. + *

    + * API name: {@code data_stream_count} + */ + public final int dataStreamCount() { + return this.dataStreamCount; + } + + /** + * Required - Information about the data streams that are managed by the data + * stream lifecycle. + *

    + * API name: {@code data_streams} + */ + public final List dataStreams() { + return this.dataStreams; + } + + /** + * The duration of the last data stream lifecycle execution. + *

    + * API name: {@code last_run_duration_in_millis} + */ + @Nullable + public final Long lastRunDurationInMillis() { + return this.lastRunDurationInMillis; + } + + /** + * The time that passed between the start of the last two data stream lifecycle + * executions. This value should amount approximately to + * data_streams.lifecycle.poll_interval. + *

    + * API name: {@code time_between_starts_in_millis} + */ + @Nullable + public final Long timeBetweenStartsInMillis() { + return this.timeBetweenStartsInMillis; + } + + /** + * Serialize this object to JSON. + */ + public void serialize(JsonGenerator generator, JsonpMapper mapper) { + generator.writeStartObject(); + serializeInternal(generator, mapper); + generator.writeEnd(); + } + + protected void serializeInternal(JsonGenerator generator, JsonpMapper mapper) { + + generator.writeKey("data_stream_count"); + generator.write(this.dataStreamCount); + + if (ApiTypeHelper.isDefined(this.dataStreams)) { + generator.writeKey("data_streams"); + generator.writeStartArray(); + for (DataStreamStats item0 : this.dataStreams) { + item0.serialize(generator, mapper); + + } + generator.writeEnd(); + + } + if (this.lastRunDurationInMillis != null) { + generator.writeKey("last_run_duration_in_millis"); + generator.write(this.lastRunDurationInMillis); + + } + if (this.timeBetweenStartsInMillis != null) { + generator.writeKey("time_between_starts_in_millis"); + generator.write(this.timeBetweenStartsInMillis); + + } + + } + + @Override + public String toString() { + return JsonpUtils.toString(this); + } + + // --------------------------------------------------------------------------------------------- + + /** + * Builder for {@link GetDataLifecycleStatsResponse}. + */ + + public static class Builder extends WithJsonObjectBuilderBase + implements + ObjectBuilder { + private Integer dataStreamCount; + + private List dataStreams; + + @Nullable + private Long lastRunDurationInMillis; + + @Nullable + private Long timeBetweenStartsInMillis; + + /** + * Required - The count of data streams currently being managed by the data + * stream lifecycle. + *

    + * API name: {@code data_stream_count} + */ + public final Builder dataStreamCount(int value) { + this.dataStreamCount = value; + return this; + } + + /** + * Required - Information about the data streams that are managed by the data + * stream lifecycle. + *

    + * API name: {@code data_streams} + *

    + * Adds all elements of list to dataStreams. + */ + public final Builder dataStreams(List list) { + this.dataStreams = _listAddAll(this.dataStreams, list); + return this; + } + + /** + * Required - Information about the data streams that are managed by the data + * stream lifecycle. + *

    + * API name: {@code data_streams} + *

    + * Adds one or more values to dataStreams. + */ + public final Builder dataStreams(DataStreamStats value, DataStreamStats... values) { + this.dataStreams = _listAdd(this.dataStreams, value, values); + return this; + } + + /** + * Required - Information about the data streams that are managed by the data + * stream lifecycle. + *

    + * API name: {@code data_streams} + *

    + * Adds a value to dataStreams using a builder lambda. + */ + public final Builder dataStreams(Function> fn) { + return dataStreams(fn.apply(new DataStreamStats.Builder()).build()); + } + + /** + * The duration of the last data stream lifecycle execution. + *

    + * API name: {@code last_run_duration_in_millis} + */ + public final Builder lastRunDurationInMillis(@Nullable Long value) { + this.lastRunDurationInMillis = value; + return this; + } + + /** + * The time that passed between the start of the last two data stream lifecycle + * executions. This value should amount approximately to + * data_streams.lifecycle.poll_interval. + *

    + * API name: {@code time_between_starts_in_millis} + */ + public final Builder timeBetweenStartsInMillis(@Nullable Long value) { + this.timeBetweenStartsInMillis = value; + return this; + } + + @Override + protected Builder self() { + return this; + } + + /** + * Builds a {@link GetDataLifecycleStatsResponse}. + * + * @throws NullPointerException + * if some of the required fields are null. + */ + public GetDataLifecycleStatsResponse build() { + _checkSingleUse(); + + return new GetDataLifecycleStatsResponse(this); + } + } + + // --------------------------------------------------------------------------------------------- + + /** + * Json deserializer for {@link GetDataLifecycleStatsResponse} + */ + public static final JsonpDeserializer _DESERIALIZER = ObjectBuilderDeserializer + .lazy(Builder::new, GetDataLifecycleStatsResponse::setupGetDataLifecycleStatsResponseDeserializer); + + protected static void setupGetDataLifecycleStatsResponseDeserializer( + ObjectDeserializer op) { + + op.add(Builder::dataStreamCount, JsonpDeserializer.integerDeserializer(), "data_stream_count"); + op.add(Builder::dataStreams, JsonpDeserializer.arrayDeserializer(DataStreamStats._DESERIALIZER), + "data_streams"); + op.add(Builder::lastRunDurationInMillis, JsonpDeserializer.longDeserializer(), "last_run_duration_in_millis"); + op.add(Builder::timeBetweenStartsInMillis, JsonpDeserializer.longDeserializer(), + "time_between_starts_in_millis"); + + } + +} diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/GetFieldMappingRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/GetFieldMappingRequest.java index 7c3a3c52d..ff9755e24 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/GetFieldMappingRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/GetFieldMappingRequest.java @@ -62,6 +62,9 @@ * Get mapping definitions. Retrieves mapping definitions for one or more * fields. For data streams, the API retrieves field mappings for the stream’s * backing indices. + *

    + * This API is useful if you don't need a complete mapping or if an index + * mapping contains a large number of fields. * * @see API @@ -134,7 +137,7 @@ public final List expandWildcards() { /** * Required - Comma-separated list or wildcard expression of fields used to - * limit returned information. + * limit returned information. Supports wildcards (*). *

    * API name: {@code fields} */ @@ -263,7 +266,7 @@ public final Builder expandWildcards(ExpandWildcard value, ExpandWildcard... val /** * Required - Comma-separated list or wildcard expression of fields used to - * limit returned information. + * limit returned information. Supports wildcards (*). *

    * API name: {@code fields} *

    @@ -276,7 +279,7 @@ public final Builder fields(List list) { /** * Required - Comma-separated list or wildcard expression of fields used to - * limit returned information. + * limit returned information. Supports wildcards (*). *

    * API name: {@code fields} *

    diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/GetIndexRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/GetIndexRequest.java index f25673cc9..570c38435 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/GetIndexRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/GetIndexRequest.java @@ -61,8 +61,8 @@ // typedef: indices.get.Request /** - * Get index information. Returns information about one or more indices. For - * data streams, the API returns information about the stream’s backing indices. + * Get index information. Get information about one or more indices. For data + * streams, the API returns information about the stream’s backing indices. * * @see API * specification diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/GetIndexTemplateRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/GetIndexTemplateRequest.java index 88d77c6fb..9346aebe1 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/GetIndexTemplateRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/GetIndexTemplateRequest.java @@ -56,7 +56,7 @@ // typedef: indices.get_index_template.Request /** - * Get index templates. Returns information about one or more index templates. + * Get index templates. Get information about one or more index templates. * * @see API diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/GetIndicesSettingsRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/GetIndicesSettingsRequest.java index b24881456..adbb47461 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/GetIndicesSettingsRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/GetIndicesSettingsRequest.java @@ -60,8 +60,8 @@ // typedef: indices.get_settings.Request /** - * Get index settings. Returns setting information for one or more indices. For - * data streams, returns setting information for the stream’s backing indices. + * Get index settings. Get setting information for one or more indices. For data + * streams, it returns setting information for the stream's backing indices. * * @see API * specification diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/GetMappingRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/GetMappingRequest.java index ea3bf519a..7d5a14bef 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/GetMappingRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/GetMappingRequest.java @@ -60,9 +60,8 @@ // typedef: indices.get_mapping.Request /** - * Get mapping definitions. Retrieves mapping definitions for one or more - * indices. For data streams, the API retrieves mappings for the stream’s - * backing indices. + * Get mapping definitions. For data streams, the API retrieves mappings for the + * stream’s backing indices. * * @see API * specification diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/GetTemplateRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/GetTemplateRequest.java index 554f46841..14e709082 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/GetTemplateRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/GetTemplateRequest.java @@ -59,7 +59,11 @@ // typedef: indices.get_template.Request /** - * Get index templates. Retrieves information about one or more index templates. + * Get index templates. Get information about one or more index templates. + *

    + * IMPORTANT: This documentation is about legacy index templates, which are + * deprecated and will be replaced by the composable templates introduced in + * Elasticsearch 7.8. * * @see API * specification diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/OpenRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/OpenRequest.java index b4f63d70d..26ec2d8d0 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/OpenRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/OpenRequest.java @@ -61,8 +61,39 @@ // typedef: indices.open.Request /** - * Opens a closed index. For data streams, the API opens any closed backing + * Open a closed index. For data streams, the API opens any closed backing * indices. + *

    + * A closed index is blocked for read/write operations and does not allow all + * operations that opened indices allow. It is not possible to index documents + * or to search for documents in a closed index. This allows closed indices to + * not have to maintain internal data structures for indexing or searching + * documents, resulting in a smaller overhead on the cluster. + *

    + * When opening or closing an index, the master is responsible for restarting + * the index shards to reflect the new state of the index. The shards will then + * go through the normal recovery process. The data of opened or closed indices + * is automatically replicated by the cluster to ensure that enough shard copies + * are safely kept around at all times. + *

    + * You can open and close multiple indices. An error is thrown if the request + * explicitly refers to a missing index. This behavior can be turned off by + * using the ignore_unavailable=true parameter. + *

    + * By default, you must explicitly name the indices you are opening or closing. + * To open or close indices with _all, *, or other + * wildcard expressions, change the + * action.destructive_requires_name setting to false. + * This setting can also be changed with the cluster update settings API. + *

    + * Closed indices consume a significant amount of disk-space which can cause + * problems in managed environments. Closing indices can be turned off with the + * cluster settings API by setting cluster.indices.close.enable to + * false. + *

    + * Because opening or closing an index allocates its shards, the + * wait_for_active_shards setting on index creation applies to the + * _open and _close index actions as well. * * @see API * specification diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/PutIndexTemplateRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/PutIndexTemplateRequest.java index f0ea33bcb..c5ac93143 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/PutIndexTemplateRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/PutIndexTemplateRequest.java @@ -65,6 +65,45 @@ /** * Create or update an index template. Index templates define settings, * mappings, and aliases that can be applied automatically to new indices. + *

    + * Elasticsearch applies templates to new indices based on an wildcard pattern + * that matches the index name. Index templates are applied during data stream + * or index creation. For data streams, these settings and mappings are applied + * when the stream's backing indices are created. Settings and mappings + * specified in a create index API request override any settings or mappings + * specified in an index template. Changes to index templates do not affect + * existing indices, including the existing backing indices of a data stream. + *

    + * You can use C-style /* *\/ block comments in index templates. + * You can include comments anywhere in the request body, except before the + * opening curly bracket. + *

    + * Multiple matching templates + *

    + * If multiple index templates match the name of a new index or data stream, the + * template with the highest priority is used. + *

    + * Multiple templates with overlapping index patterns at the same priority are + * not allowed and an error will be thrown when attempting to create a template + * matching an existing index template at identical priorities. + *

    + * Composing aliases, mappings, and settings + *

    + * When multiple component templates are specified in the + * composed_of field for an index template, they are merged in the + * order specified, meaning that later component templates override earlier + * component templates. Any mappings, settings, or aliases from the parent index + * template are merged in next. Finally, any configuration on the index request + * itself is merged. Mapping definitions are merged recursively, which means + * that later mapping components can introduce new field mappings and update the + * mapping configuration. If a field mapping is already contained in an earlier + * component, its definition will be completely overwritten by the later one. + * This recursive merging strategy applies not only to field mappings, but also + * root options like dynamic_templates and meta. If an + * earlier component contains a dynamic_templates block, then by + * default new dynamic_templates entries are appended onto the end. + * If an entry already exists with the same key, then it is overwritten by the + * new definition. * * @see API @@ -135,8 +174,10 @@ public static PutIndexTemplateRequest of(Function * API name: {@code _meta} */ @@ -281,7 +322,9 @@ public final IndexTemplateMapping template() { /** * Version number used to manage index templates externally. This number is not - * automatically generated by Elasticsearch. + * automatically generated by Elasticsearch. External systems can use these + * version numbers to simplify template management. To unset a version, replace + * the template without specifying one. *

    * API name: {@code version} */ @@ -426,8 +469,10 @@ public static class Builder extends RequestBase.AbstractBuilder private Long version; /** - * Optional user metadata about the index template. May have any contents. This - * map is not automatically generated by Elasticsearch. + * Optional user metadata about the index template. It may have any contents. It + * is not automatically generated or used by Elasticsearch. This user-defined + * object is stored in the cluster state, so keeping it short is preferable To + * unset the metadata, replace the template without specifying it. *

    * API name: {@code _meta} *

    @@ -439,8 +484,10 @@ public final Builder meta(Map map) { } /** - * Optional user metadata about the index template. May have any contents. This - * map is not automatically generated by Elasticsearch. + * Optional user metadata about the index template. It may have any contents. It + * is not automatically generated or used by Elasticsearch. This user-defined + * object is stored in the cluster state, so keeping it short is preferable To + * unset the metadata, replace the template without specifying it. *

    * API name: {@code _meta} *

    @@ -670,7 +717,9 @@ public final Builder template(Function * API name: {@code version} */ diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/PutIndicesSettingsRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/PutIndicesSettingsRequest.java index 860a3a0ff..17463f432 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/PutIndicesSettingsRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/PutIndicesSettingsRequest.java @@ -65,6 +65,23 @@ /** * Update index settings. Changes dynamic index settings in real time. For data * streams, index setting changes are applied to all backing indices by default. + *

    + * To revert a setting to the default value, use a null value. The list of + * per-index settings that can be updated dynamically on live indices can be + * found in index module documentation. To preserve existing settings from being + * updated, set the preserve_existing parameter to + * true. + *

    + * NOTE: You can only define new analyzers on closed indices. To add an + * analyzer, you must close the index, define the analyzer, and reopen the + * index. You cannot close the write index of a data stream. To update the + * analyzer for a data stream's write index and future backing indices, update + * the analyzer in the index template used by the stream. Then roll over the + * data stream to apply the new analyzer to the stream's write index and future + * backing indices. This affects searches and any new data added to the stream + * after the rollover. However, it does not affect the data stream's backing + * indices or their existing data. To change the analyzer for existing backing + * indices, you must create a new data stream and reindex your data into it. * * @see API * specification diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/PutMappingRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/PutMappingRequest.java index f818dc02d..3724df6ad 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/PutMappingRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/PutMappingRequest.java @@ -70,10 +70,41 @@ // typedef: indices.put_mapping.Request /** - * Update field mappings. Adds new fields to an existing data stream or index. - * You can also use this API to change the search settings of existing fields. - * For data streams, these changes are applied to all backing indices by - * default. + * Update field mappings. Add new fields to an existing data stream or index. + * You can also use this API to change the search settings of existing fields + * and add new properties to existing object fields. For data streams, these + * changes are applied to all backing indices by default. + *

    + * Add multi-fields to an existing field + *

    + * Multi-fields let you index the same field in different ways. You can use this + * API to update the fields mapping parameter and enable multi-fields for an + * existing field. WARNING: If an index (or data stream) contains documents when + * you add a multi-field, those documents will not have values for the new + * multi-field. You can populate the new multi-field with the update by query + * API. + *

    + * Change supported mapping parameters for an existing field + *

    + * The documentation for each mapping parameter indicates whether you can update + * it for an existing field using this API. For example, you can use the update + * mapping API to update the ignore_above parameter. + *

    + * Change the mapping of an existing field + *

    + * Except for supported mapping parameters, you can't change the mapping or + * field type of an existing field. Changing an existing field could invalidate + * data that's already indexed. + *

    + * If you need to change the mapping of a field in a data stream's backing + * indices, refer to documentation about modifying data streams. If you need to + * change the mapping of a field in other indices, create a new index with the + * correct mapping and reindex your data into that index. + *

    + * Rename a field + *

    + * Renaming a field would invalidate data already indexed under the old field + * name. Instead, add an alias field to create an alternate field name. * * @see API * specification diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/PutTemplateRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/PutTemplateRequest.java index c62cb7bb2..1e6c6ec1d 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/PutTemplateRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/PutTemplateRequest.java @@ -80,6 +80,19 @@ * templates do not affect existing indices. Settings and mappings specified in * create index API requests override any settings or mappings specified in an * index template. + *

    + * You can use C-style /* *\/ block comments in index templates. + * You can include comments anywhere in the request body, except before the + * opening curly bracket. + *

    + * Indices matching multiple templates + *

    + * Multiple index templates can potentially match an index, in this case, both + * the settings and mappings are merged into the final configuration of the + * index. The order of the merging can be controlled using the order parameter, + * with lower order being applied first, and higher orders overriding them. + * NOTE: Multiple matching templates with the same order value will result in a + * non-deterministic merging order. * * @see API * specification @@ -227,7 +240,8 @@ public final IndexSettings settings() { /** * Version number used to manage index templates externally. This number is not - * automatically generated by Elasticsearch. + * automatically generated by Elasticsearch. To unset a version, replace the + * template without specifying one. *

    * API name: {@code version} */ @@ -493,7 +507,8 @@ public final Builder settings(Function * API name: {@code version} */ diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/RecoveryRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/RecoveryRequest.java index 80e7c436a..e9e59660f 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/RecoveryRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/RecoveryRequest.java @@ -62,6 +62,9 @@ * shard recoveries for one or more indices. For data streams, the API returns * information for the stream's backing indices. *

    + * All recoveries, whether ongoing or complete, are kept in the cluster state + * and may be reported on at any time. + *

    * Shard recovery is the process of initializing a shard copy, such as restoring * a primary shard from a snapshot or creating a replica shard from a primary * shard. When a shard recovery completes, the recovered shard is available for diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/RefreshRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/RefreshRequest.java index 218b5a1c4..d5f430450 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/RefreshRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/RefreshRequest.java @@ -62,6 +62,23 @@ * Refresh an index. A refresh makes recent operations performed on one or more * indices available for search. For data streams, the API runs the refresh * operation on the stream’s backing indices. + *

    + * By default, Elasticsearch periodically refreshes indices every second, but + * only on indices that have received one search request or more in the last 30 + * seconds. You can change this default interval with the + * index.refresh_interval setting. + *

    + * Refresh requests are synchronous and do not return a response until the + * refresh operation completes. + *

    + * Refreshes are resource-intensive. To ensure good cluster performance, it's + * recommended to wait for Elasticsearch's periodic refresh rather than + * performing an explicit refresh when possible. + *

    + * If your application workflow indexes documents and then runs a search to + * retrieve the indexed document, it's recommended to use the index API's + * refresh=wait_for query parameter option. This option ensures the + * indexing operation waits for a periodic refresh before running the search. * * @see API * specification diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/ResolveClusterRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/ResolveClusterRequest.java index b19c7a765..a499bf4bf 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/ResolveClusterRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/ResolveClusterRequest.java @@ -85,6 +85,36 @@ *

  • Cluster version information, including the Elasticsearch server * version.
  • * + *

    + * For example, + * GET /_resolve/cluster/my-index-*,cluster*:my-index-* returns + * information about the local cluster and all remotely configured clusters that + * start with the alias cluster*. Each cluster returns information + * about whether it has any indices, aliases or data streams that match + * my-index-*. + *

    + * Advantages of using this endpoint before a cross-cluster + * search + *

    + * You may want to exclude a cluster or index from a search when: + *

      + *
    • A remote cluster is not currently connected and is configured with + * skip_unavailable=false. Running a cross-cluster search under + * those conditions will cause the entire search to fail.
    • + *
    • A cluster has no matching indices, aliases or data streams for the index + * expression (or your user does not have permissions to search them). For + * example, suppose your index expression is logs*,remote1:logs* + * and the remote1 cluster has no indices, aliases or data streams that match + * logs*. In that case, that cluster will return no results from + * that cluster if you include it in a cross-cluster search.
    • + *
    • The index expression (combined with any query parameters you specify) + * will likely cause an exception to be thrown when you do the search. In these + * cases, the "error" field in the _resolve/cluster + * response will be present. (This is also where security/permission errors will + * be shown.)
    • + *
    • A remote cluster is an older version that does not support the feature + * you want to use in your search.
    • + *
    * * @see API * specification diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/RolloverRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/RolloverRequest.java index 6c6fb15d4..b6dd1be0d 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/RolloverRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/RolloverRequest.java @@ -63,8 +63,57 @@ // typedef: indices.rollover.Request /** - * Roll over to a new index. Creates a new index for a data stream or index + * Roll over to a new index. TIP: It is recommended to use the index lifecycle + * rollover action to automate rollovers. + *

    + * The rollover API creates a new index for a data stream or index alias. The + * API behavior depends on the rollover target. + *

    + * Roll over a data stream + *

    + * If you roll over a data stream, the API creates a new write index for the + * stream. The stream's previous write index becomes a regular backing index. A + * rollover also increments the data stream's generation. + *

    + * Roll over an index alias with a write index + *

    + * TIP: Prior to Elasticsearch 7.9, you'd typically use an index alias with a + * write index to manage time series data. Data streams replace this + * functionality, require less maintenance, and automatically integrate with + * data tiers. + *

    + * If an index alias points to multiple indices, one of the indices must be a + * write index. The rollover API creates a new write index for the alias with + * is_write_index set to true. The API also + * sets is_write_index to false for the previous write + * index. + *

    + * Roll over an index alias with one index + *

    + * If you roll over an index alias that points to only one index, the API + * creates a new index for the alias and removes the original index from the * alias. + *

    + * NOTE: A rollover creates a new index and is subject to the + * wait_for_active_shards setting. + *

    + * Increment index names for an alias + *

    + * When you roll over an index alias, you can specify a name for the new index. + * If you don't specify a name and the current index ends with - + * and a number, such as my-index-000001 or + * my-index-3, the new index name increments that number. For + * example, if you roll over an alias with a current index of + * my-index-000001, the rollover creates a new index named + * my-index-000002. This number is always six characters and + * zero-padded, regardless of the previous index's name. + *

    + * If you use an index alias for time series data, you can use date math in the + * index name to track the rollover date. For example, you can create an alias + * that points to an index named <my-index-{now/d}-000001>. + * If you create the index on May 6, 2099, the index's name is + * my-index-2099.05.06-000001. If you roll over the alias on May 7, + * 2099, the new index's name is my-index-2099.05.07-000002. * * @see API * specification diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/SimulateIndexTemplateRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/SimulateIndexTemplateRequest.java index 289107ed5..0c4171432 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/SimulateIndexTemplateRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/SimulateIndexTemplateRequest.java @@ -57,8 +57,8 @@ // typedef: indices.simulate_index_template.Request /** - * Simulate an index. Returns the index configuration that would be applied to - * the specified index from an existing index template. + * Simulate an index. Get the index configuration that would be applied to the + * specified index from an existing index template. * * @see API diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/SimulateTemplateRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/SimulateTemplateRequest.java index 8f253a4c8..028412fce 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/SimulateTemplateRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/SimulateTemplateRequest.java @@ -63,8 +63,8 @@ // typedef: indices.simulate_template.Request /** - * Simulate an index template. Returns the index configuration that would be - * applied by a particular index template. + * Simulate an index template. Get the index configuration that would be applied + * by a particular index template. * * @see API diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/SplitRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/SplitRequest.java index 0fc0cdcba..d0a960415 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/SplitRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/SplitRequest.java @@ -79,6 +79,18 @@ * * *

    + * You can do make an index read-only with the following request using the add + * index block API: + * + *

    + * PUT /my_source_index/_block/write
    + * 
    + * 
    + *

    + * The current write index on a data stream cannot be split. In order to split + * the current write index, the data stream must first be rolled over so that a + * new write index is created and then the previous write index can be split. + *

    * The number of times the index can be split (and the number of shards that * each original shard can be split into) is determined by the * index.number_of_routing_shards setting. The number of routing diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/UnfreezeRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/UnfreezeRequest.java index 562ae0122..093190a6f 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/UnfreezeRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/UnfreezeRequest.java @@ -65,8 +65,9 @@ * * @see API * specification + * @deprecated 7.14.0 */ - +@Deprecated public class UnfreezeRequest extends RequestBase { @Nullable private final Boolean allowNoIndices; @@ -191,7 +192,7 @@ public final String waitForActiveShards() { /** * Builder for {@link UnfreezeRequest}. */ - + @Deprecated public static class Builder extends RequestBase.AbstractBuilder implements ObjectBuilder { @Nullable private Boolean allowNoIndices; diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/get_data_lifecycle_stats/DataStreamStats.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/get_data_lifecycle_stats/DataStreamStats.java new file mode 100644 index 000000000..4257926a1 --- /dev/null +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/get_data_lifecycle_stats/DataStreamStats.java @@ -0,0 +1,219 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package co.elastic.clients.elasticsearch.indices.get_data_lifecycle_stats; + +import co.elastic.clients.json.JsonpDeserializable; +import co.elastic.clients.json.JsonpDeserializer; +import co.elastic.clients.json.JsonpMapper; +import co.elastic.clients.json.JsonpSerializable; +import co.elastic.clients.json.JsonpUtils; +import co.elastic.clients.json.ObjectBuilderDeserializer; +import co.elastic.clients.json.ObjectDeserializer; +import co.elastic.clients.util.ApiTypeHelper; +import co.elastic.clients.util.ObjectBuilder; +import co.elastic.clients.util.WithJsonObjectBuilderBase; +import jakarta.json.stream.JsonGenerator; +import java.lang.Integer; +import java.lang.String; +import java.util.Objects; +import java.util.function.Function; +import javax.annotation.Nullable; + +//---------------------------------------------------------------- +// THIS CODE IS GENERATED. MANUAL EDITS WILL BE LOST. +//---------------------------------------------------------------- +// +// This code is generated from the Elasticsearch API specification +// at https://github.com/elastic/elasticsearch-specification +// +// Manual updates to this file will be lost when the code is +// re-generated. +// +// If you find a property that is missing or wrongly typed, please +// open an issue or a PR on the API specification repository. +// +//---------------------------------------------------------------- + +// typedef: indices.get_data_lifecycle_stats.DataStreamStats + +/** + * + * @see API + * specification + */ +@JsonpDeserializable +public class DataStreamStats implements JsonpSerializable { + private final int backingIndicesInError; + + private final int backingIndicesInTotal; + + private final String name; + + // --------------------------------------------------------------------------------------------- + + private DataStreamStats(Builder builder) { + + this.backingIndicesInError = ApiTypeHelper.requireNonNull(builder.backingIndicesInError, this, + "backingIndicesInError"); + this.backingIndicesInTotal = ApiTypeHelper.requireNonNull(builder.backingIndicesInTotal, this, + "backingIndicesInTotal"); + this.name = ApiTypeHelper.requireNonNull(builder.name, this, "name"); + + } + + public static DataStreamStats of(Function> fn) { + return fn.apply(new Builder()).build(); + } + + /** + * Required - The count of the backing indices for the data stream. + *

    + * API name: {@code backing_indices_in_error} + */ + public final int backingIndicesInError() { + return this.backingIndicesInError; + } + + /** + * Required - The count of the backing indices for the data stream that have + * encountered an error. + *

    + * API name: {@code backing_indices_in_total} + */ + public final int backingIndicesInTotal() { + return this.backingIndicesInTotal; + } + + /** + * Required - The name of the data stream. + *

    + * API name: {@code name} + */ + public final String name() { + return this.name; + } + + /** + * Serialize this object to JSON. + */ + public void serialize(JsonGenerator generator, JsonpMapper mapper) { + generator.writeStartObject(); + serializeInternal(generator, mapper); + generator.writeEnd(); + } + + protected void serializeInternal(JsonGenerator generator, JsonpMapper mapper) { + + generator.writeKey("backing_indices_in_error"); + generator.write(this.backingIndicesInError); + + generator.writeKey("backing_indices_in_total"); + generator.write(this.backingIndicesInTotal); + + generator.writeKey("name"); + generator.write(this.name); + + } + + @Override + public String toString() { + return JsonpUtils.toString(this); + } + + // --------------------------------------------------------------------------------------------- + + /** + * Builder for {@link DataStreamStats}. + */ + + public static class Builder extends WithJsonObjectBuilderBase implements ObjectBuilder { + private Integer backingIndicesInError; + + private Integer backingIndicesInTotal; + + private String name; + + /** + * Required - The count of the backing indices for the data stream. + *

    + * API name: {@code backing_indices_in_error} + */ + public final Builder backingIndicesInError(int value) { + this.backingIndicesInError = value; + return this; + } + + /** + * Required - The count of the backing indices for the data stream that have + * encountered an error. + *

    + * API name: {@code backing_indices_in_total} + */ + public final Builder backingIndicesInTotal(int value) { + this.backingIndicesInTotal = value; + return this; + } + + /** + * Required - The name of the data stream. + *

    + * API name: {@code name} + */ + public final Builder name(String value) { + this.name = value; + return this; + } + + @Override + protected Builder self() { + return this; + } + + /** + * Builds a {@link DataStreamStats}. + * + * @throws NullPointerException + * if some of the required fields are null. + */ + public DataStreamStats build() { + _checkSingleUse(); + + return new DataStreamStats(this); + } + } + + // --------------------------------------------------------------------------------------------- + + /** + * Json deserializer for {@link DataStreamStats} + */ + public static final JsonpDeserializer _DESERIALIZER = ObjectBuilderDeserializer.lazy(Builder::new, + DataStreamStats::setupDataStreamStatsDeserializer); + + protected static void setupDataStreamStatsDeserializer(ObjectDeserializer op) { + + op.add(Builder::backingIndicesInError, JsonpDeserializer.integerDeserializer(), "backing_indices_in_error"); + op.add(Builder::backingIndicesInTotal, JsonpDeserializer.integerDeserializer(), "backing_indices_in_total"); + op.add(Builder::name, JsonpDeserializer.stringDeserializer(), "name"); + + } + +} diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/resolve_cluster/ResolveClusterInfo.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/resolve_cluster/ResolveClusterInfo.java index bf926a5fa..e5bcaf227 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/resolve_cluster/ResolveClusterInfo.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/resolve_cluster/ResolveClusterInfo.java @@ -104,7 +104,7 @@ public final boolean connected() { } /** - * Required - The skip_unavailable setting for a remote cluster. + * Required - The skip_unavailable setting for a remote cluster. *

    * API name: {@code skip_unavailable} */ @@ -125,8 +125,8 @@ public final Boolean matchingIndices() { /** * Provides error messages that are likely to occur if you do a search with this - * index expression on the specified cluster (e.g., lack of security privileges - * to query an index). + * index expression on the specified cluster (for example, lack of security + * privileges to query an index). *

    * API name: {@code error} */ @@ -219,7 +219,7 @@ public final Builder connected(boolean value) { } /** - * Required - The skip_unavailable setting for a remote cluster. + * Required - The skip_unavailable setting for a remote cluster. *

    * API name: {@code skip_unavailable} */ @@ -241,8 +241,8 @@ public final Builder matchingIndices(@Nullable Boolean value) { /** * Provides error messages that are likely to occur if you do a search with this - * index expression on the specified cluster (e.g., lack of security privileges - * to query an index). + * index expression on the specified cluster (for example, lack of security + * privileges to query an index). *

    * API name: {@code error} */ diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/inference/ElasticsearchInferenceAsyncClient.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/inference/ElasticsearchInferenceAsyncClient.java index 64685ed53..12346a688 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/inference/ElasticsearchInferenceAsyncClient.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/inference/ElasticsearchInferenceAsyncClient.java @@ -182,7 +182,25 @@ public final CompletableFuture inference( // ----- Endpoint: inference.put /** - * Create an inference endpoint + * Create an inference endpoint. When you create an inference endpoint, the + * associated machine learning model is automatically deployed if it is not + * already running. After creating the endpoint, wait for the model deployment + * to complete before using it. To verify the deployment status, use the get + * trained model statistics API. Look for + * "state": "fully_allocated" in the response + * and ensure that the "allocation_count" matches the + * "target_allocation_count". Avoid creating multiple + * endpoints for the same model unless required, as each endpoint consumes + * significant resources. + *

    + * IMPORTANT: The inference APIs enable you to use certain services, such as + * built-in machine learning models (ELSER, E5), models uploaded through Eland, + * Cohere, OpenAI, Mistral, Azure OpenAI, Google AI Studio, Google Vertex AI, + * Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models + * uploaded through Eland, the inference APIs offer an alternative way to use + * and manage trained models. However, if you do not plan to use the inference + * APIs to use these models or if you want to use non-NLP models, use the + * machine learning trained model APIs. * * @see Documentation @@ -197,7 +215,25 @@ public CompletableFuture put(PutRequest request) { } /** - * Create an inference endpoint + * Create an inference endpoint. When you create an inference endpoint, the + * associated machine learning model is automatically deployed if it is not + * already running. After creating the endpoint, wait for the model deployment + * to complete before using it. To verify the deployment status, use the get + * trained model statistics API. Look for + * "state": "fully_allocated" in the response + * and ensure that the "allocation_count" matches the + * "target_allocation_count". Avoid creating multiple + * endpoints for the same model unless required, as each endpoint consumes + * significant resources. + *

    + * IMPORTANT: The inference APIs enable you to use certain services, such as + * built-in machine learning models (ELSER, E5), models uploaded through Eland, + * Cohere, OpenAI, Mistral, Azure OpenAI, Google AI Studio, Google Vertex AI, + * Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models + * uploaded through Eland, the inference APIs offer an alternative way to use + * and manage trained models. However, if you do not plan to use the inference + * APIs to use these models or if you want to use non-NLP models, use the + * machine learning trained model APIs. * * @param fn * a function that initializes a builder to create the diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/inference/ElasticsearchInferenceClient.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/inference/ElasticsearchInferenceClient.java index 753d3646c..9910b5717 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/inference/ElasticsearchInferenceClient.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/inference/ElasticsearchInferenceClient.java @@ -181,7 +181,25 @@ public final InferenceResponse inference(Function"state": "fully_allocated" in the response + * and ensure that the "allocation_count" matches the + * "target_allocation_count". Avoid creating multiple + * endpoints for the same model unless required, as each endpoint consumes + * significant resources. + *

    + * IMPORTANT: The inference APIs enable you to use certain services, such as + * built-in machine learning models (ELSER, E5), models uploaded through Eland, + * Cohere, OpenAI, Mistral, Azure OpenAI, Google AI Studio, Google Vertex AI, + * Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models + * uploaded through Eland, the inference APIs offer an alternative way to use + * and manage trained models. However, if you do not plan to use the inference + * APIs to use these models or if you want to use non-NLP models, use the + * machine learning trained model APIs. * * @see Documentation @@ -196,7 +214,25 @@ public PutResponse put(PutRequest request) throws IOException, ElasticsearchExce } /** - * Create an inference endpoint + * Create an inference endpoint. When you create an inference endpoint, the + * associated machine learning model is automatically deployed if it is not + * already running. After creating the endpoint, wait for the model deployment + * to complete before using it. To verify the deployment status, use the get + * trained model statistics API. Look for + * "state": "fully_allocated" in the response + * and ensure that the "allocation_count" matches the + * "target_allocation_count". Avoid creating multiple + * endpoints for the same model unless required, as each endpoint consumes + * significant resources. + *

    + * IMPORTANT: The inference APIs enable you to use certain services, such as + * built-in machine learning models (ELSER, E5), models uploaded through Eland, + * Cohere, OpenAI, Mistral, Azure OpenAI, Google AI Studio, Google Vertex AI, + * Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models + * uploaded through Eland, the inference APIs offer an alternative way to use + * and manage trained models. However, if you do not plan to use the inference + * APIs to use these models or if you want to use non-NLP models, use the + * machine learning trained model APIs. * * @param fn * a function that initializes a builder to create the diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/inference/PutRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/inference/PutRequest.java index 3de1fbd28..6f734f226 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/inference/PutRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/inference/PutRequest.java @@ -59,7 +59,25 @@ // typedef: inference.put.Request /** - * Create an inference endpoint + * Create an inference endpoint. When you create an inference endpoint, the + * associated machine learning model is automatically deployed if it is not + * already running. After creating the endpoint, wait for the model deployment + * to complete before using it. To verify the deployment status, use the get + * trained model statistics API. Look for + * "state": "fully_allocated" in the response + * and ensure that the "allocation_count" matches the + * "target_allocation_count". Avoid creating multiple + * endpoints for the same model unless required, as each endpoint consumes + * significant resources. + *

    + * IMPORTANT: The inference APIs enable you to use certain services, such as + * built-in machine learning models (ELSER, E5), models uploaded through Eland, + * Cohere, OpenAI, Mistral, Azure OpenAI, Google AI Studio, Google Vertex AI, + * Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models + * uploaded through Eland, the inference APIs offer an alternative way to use + * and manage trained models. However, if you do not plan to use the inference + * APIs to use these models or if you want to use non-NLP models, use the + * machine learning trained model APIs. * * @see API * specification diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/ingest/DatabaseConfiguration.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/ingest/DatabaseConfiguration.java index 0314ef484..db481b803 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/ingest/DatabaseConfiguration.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/ingest/DatabaseConfiguration.java @@ -60,8 +60,9 @@ * The configuration necessary to identify which IP geolocation provider to use * to download a database, as well as any provider-specific configuration * necessary for such downloading. At present, the only supported providers are - * maxmind and ipinfo, and the maxmind provider requires that an account_id - * (string) is configured. A provider (either maxmind or ipinfo) must be + * maxmind and ipinfo, and the maxmind + * provider requires that an account_id (string) is configured. A + * provider (either maxmind or ipinfo) must be * specified. The web and local providers can be returned as read only * configurations. * diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/ingest/DeleteIpLocationDatabaseRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/ingest/DeleteIpLocationDatabaseRequest.java index 9fbffa205..5622c2bcc 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/ingest/DeleteIpLocationDatabaseRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/ingest/DeleteIpLocationDatabaseRequest.java @@ -58,7 +58,7 @@ // typedef: ingest.delete_ip_location_database.Request /** - * Deletes an IP location database configuration. + * Delete IP geolocation database configurations. * * @see API @@ -90,8 +90,7 @@ public static DeleteIpLocationDatabaseRequest of( } /** - * Required - A comma-separated list of IP location database configurations to - * delete + * Required - A comma-separated list of IP location database configurations. *

    * API name: {@code id} */ @@ -100,8 +99,9 @@ public final List id() { } /** - * Period to wait for a connection to the master node. If no response is + * The period to wait for a connection to the master node. If no response is * received before the timeout expires, the request fails and returns an error. + * A value of -1 indicates that the request should never time out. *

    * API name: {@code master_timeout} */ @@ -111,8 +111,9 @@ public final Time masterTimeout() { } /** - * Period to wait for a response. If no response is received before the timeout - * expires, the request fails and returns an error. + * The period to wait for a response. If no response is received before the + * timeout expires, the request fails and returns an error. A value of + * -1 indicates that the request should never time out. *

    * API name: {@code timeout} */ @@ -139,8 +140,7 @@ public static class Builder extends RequestBase.AbstractBuilder private Time timeout; /** - * Required - A comma-separated list of IP location database configurations to - * delete + * Required - A comma-separated list of IP location database configurations. *

    * API name: {@code id} *

    @@ -152,8 +152,7 @@ public final Builder id(List list) { } /** - * Required - A comma-separated list of IP location database configurations to - * delete + * Required - A comma-separated list of IP location database configurations. *

    * API name: {@code id} *

    @@ -165,8 +164,9 @@ public final Builder id(String value, String... values) { } /** - * Period to wait for a connection to the master node. If no response is + * The period to wait for a connection to the master node. If no response is * received before the timeout expires, the request fails and returns an error. + * A value of -1 indicates that the request should never time out. *

    * API name: {@code master_timeout} */ @@ -176,8 +176,9 @@ public final Builder masterTimeout(@Nullable Time value) { } /** - * Period to wait for a connection to the master node. If no response is + * The period to wait for a connection to the master node. If no response is * received before the timeout expires, the request fails and returns an error. + * A value of -1 indicates that the request should never time out. *

    * API name: {@code master_timeout} */ @@ -186,8 +187,9 @@ public final Builder masterTimeout(Function> f } /** - * Period to wait for a response. If no response is received before the timeout - * expires, the request fails and returns an error. + * The period to wait for a response. If no response is received before the + * timeout expires, the request fails and returns an error. A value of + * -1 indicates that the request should never time out. *

    * API name: {@code timeout} */ @@ -197,8 +199,9 @@ public final Builder timeout(@Nullable Time value) { } /** - * Period to wait for a response. If no response is received before the timeout - * expires, the request fails and returns an error. + * The period to wait for a response. If no response is received before the + * timeout expires, the request fails and returns an error. A value of + * -1 indicates that the request should never time out. *

    * API name: {@code timeout} */ diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/ingest/ElasticsearchIngestAsyncClient.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/ingest/ElasticsearchIngestAsyncClient.java index bb678fb65..cd0b61db5 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/ingest/ElasticsearchIngestAsyncClient.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/ingest/ElasticsearchIngestAsyncClient.java @@ -103,7 +103,7 @@ public final CompletableFuture deleteGeoipDatabase( // ----- Endpoint: ingest.delete_ip_location_database /** - * Deletes an IP location database configuration. + * Delete IP geolocation database configurations. * * @see Documentation @@ -119,7 +119,7 @@ public CompletableFuture deleteIpLocationDatab } /** - * Deletes an IP location database configuration. + * Delete IP geolocation database configurations. * * @param fn * a function that initializes a builder to create the @@ -234,7 +234,7 @@ public CompletableFuture getGeoipDatabase() { // ----- Endpoint: ingest.get_ip_location_database /** - * Returns information about one or more IP location database configurations. + * Get IP geolocation database configurations. * * @see Documentation @@ -250,7 +250,7 @@ public CompletableFuture getIpLocationDatabase( } /** - * Returns information about one or more IP location database configurations. + * Get IP geolocation database configurations. * * @param fn * a function that initializes a builder to create the @@ -266,7 +266,7 @@ public final CompletableFuture getIpLocationDatab } /** - * Returns information about one or more IP location database configurations. + * Get IP geolocation database configurations. * * @see Documentation @@ -348,8 +348,8 @@ public CompletableFuture processorGrok() { // ----- Endpoint: ingest.put_geoip_database /** - * Create or update GeoIP database configurations. Create or update IP - * geolocation database configurations. + * Create or update a GeoIP database configuration. Refer to the create or + * update IP geolocation database configuration API. * * @see Documentation @@ -364,8 +364,8 @@ public CompletableFuture putGeoipDatabase(PutGeoipData } /** - * Create or update GeoIP database configurations. Create or update IP - * geolocation database configurations. + * Create or update a GeoIP database configuration. Refer to the create or + * update IP geolocation database configuration API. * * @param fn * a function that initializes a builder to create the @@ -383,7 +383,7 @@ public final CompletableFuture putGeoipDatabase( // ----- Endpoint: ingest.put_ip_location_database /** - * Returns information about one or more IP location database configurations. + * Create or update an IP geolocation database configuration. * * @see Documentation @@ -399,7 +399,7 @@ public CompletableFuture putIpLocationDatabase( } /** - * Returns information about one or more IP location database configurations. + * Create or update an IP geolocation database configuration. * * @param fn * a function that initializes a builder to create the diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/ingest/ElasticsearchIngestClient.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/ingest/ElasticsearchIngestClient.java index c4d198a4e..ec2c8cbc8 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/ingest/ElasticsearchIngestClient.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/ingest/ElasticsearchIngestClient.java @@ -105,7 +105,7 @@ public final DeleteGeoipDatabaseResponse deleteGeoipDatabase( // ----- Endpoint: ingest.delete_ip_location_database /** - * Deletes an IP location database configuration. + * Delete IP geolocation database configurations. * * @see Documentation @@ -121,7 +121,7 @@ public DeleteIpLocationDatabaseResponse deleteIpLocationDatabase(DeleteIpLocatio } /** - * Deletes an IP location database configuration. + * Delete IP geolocation database configurations. * * @param fn * a function that initializes a builder to create the @@ -241,7 +241,7 @@ public GetGeoipDatabaseResponse getGeoipDatabase() throws IOException, Elasticse // ----- Endpoint: ingest.get_ip_location_database /** - * Returns information about one or more IP location database configurations. + * Get IP geolocation database configurations. * * @see Documentation @@ -257,7 +257,7 @@ public GetIpLocationDatabaseResponse getIpLocationDatabase(GetIpLocationDatabase } /** - * Returns information about one or more IP location database configurations. + * Get IP geolocation database configurations. * * @param fn * a function that initializes a builder to create the @@ -274,7 +274,7 @@ public final GetIpLocationDatabaseResponse getIpLocationDatabase( } /** - * Returns information about one or more IP location database configurations. + * Get IP geolocation database configurations. * * @see Documentation @@ -357,8 +357,8 @@ public ProcessorGrokResponse processorGrok() throws IOException, ElasticsearchEx // ----- Endpoint: ingest.put_geoip_database /** - * Create or update GeoIP database configurations. Create or update IP - * geolocation database configurations. + * Create or update a GeoIP database configuration. Refer to the create or + * update IP geolocation database configuration API. * * @see Documentation @@ -374,8 +374,8 @@ public PutGeoipDatabaseResponse putGeoipDatabase(PutGeoipDatabaseRequest request } /** - * Create or update GeoIP database configurations. Create or update IP - * geolocation database configurations. + * Create or update a GeoIP database configuration. Refer to the create or + * update IP geolocation database configuration API. * * @param fn * a function that initializes a builder to create the @@ -394,7 +394,7 @@ public final PutGeoipDatabaseResponse putGeoipDatabase( // ----- Endpoint: ingest.put_ip_location_database /** - * Returns information about one or more IP location database configurations. + * Create or update an IP geolocation database configuration. * * @see Documentation @@ -410,7 +410,7 @@ public PutIpLocationDatabaseResponse putIpLocationDatabase(PutIpLocationDatabase } /** - * Returns information about one or more IP location database configurations. + * Create or update an IP geolocation database configuration. * * @param fn * a function that initializes a builder to create the diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/ingest/GetIpLocationDatabaseRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/ingest/GetIpLocationDatabaseRequest.java index 570f1b352..db8c746c8 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/ingest/GetIpLocationDatabaseRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/ingest/GetIpLocationDatabaseRequest.java @@ -58,7 +58,7 @@ // typedef: ingest.get_ip_location_database.Request /** - * Returns information about one or more IP location database configurations. + * Get IP geolocation database configurations. * * @see API @@ -96,8 +96,9 @@ public final List id() { } /** - * Period to wait for a connection to the master node. If no response is + * The period to wait for a connection to the master node. If no response is * received before the timeout expires, the request fails and returns an error. + * A value of -1 indicates that the request should never time out. *

    * API name: {@code master_timeout} */ @@ -150,8 +151,9 @@ public final Builder id(String value, String... values) { } /** - * Period to wait for a connection to the master node. If no response is + * The period to wait for a connection to the master node. If no response is * received before the timeout expires, the request fails and returns an error. + * A value of -1 indicates that the request should never time out. *

    * API name: {@code master_timeout} */ @@ -161,8 +163,9 @@ public final Builder masterTimeout(@Nullable Time value) { } /** - * Period to wait for a connection to the master node. If no response is + * The period to wait for a connection to the master node. If no response is * received before the timeout expires, the request fails and returns an error. + * A value of -1 indicates that the request should never time out. *

    * API name: {@code master_timeout} */ diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/ingest/PipelineConfig.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/ingest/PipelineConfig.java new file mode 100644 index 000000000..15d0e1c60 --- /dev/null +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/ingest/PipelineConfig.java @@ -0,0 +1,262 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package co.elastic.clients.elasticsearch.ingest; + +import co.elastic.clients.json.JsonpDeserializable; +import co.elastic.clients.json.JsonpDeserializer; +import co.elastic.clients.json.JsonpMapper; +import co.elastic.clients.json.JsonpSerializable; +import co.elastic.clients.json.JsonpUtils; +import co.elastic.clients.json.ObjectBuilderDeserializer; +import co.elastic.clients.json.ObjectDeserializer; +import co.elastic.clients.util.ApiTypeHelper; +import co.elastic.clients.util.ObjectBuilder; +import co.elastic.clients.util.WithJsonObjectBuilderBase; +import jakarta.json.stream.JsonGenerator; +import java.lang.Long; +import java.lang.String; +import java.util.List; +import java.util.Objects; +import java.util.function.Function; +import javax.annotation.Nullable; + +//---------------------------------------------------------------- +// THIS CODE IS GENERATED. MANUAL EDITS WILL BE LOST. +//---------------------------------------------------------------- +// +// This code is generated from the Elasticsearch API specification +// at https://github.com/elastic/elasticsearch-specification +// +// Manual updates to this file will be lost when the code is +// re-generated. +// +// If you find a property that is missing or wrongly typed, please +// open an issue or a PR on the API specification repository. +// +//---------------------------------------------------------------- + +// typedef: ingest._types.PipelineConfig + +/** + * + * @see API + * specification + */ +@JsonpDeserializable +public class PipelineConfig implements JsonpSerializable { + @Nullable + private final String description; + + @Nullable + private final Long version; + + private final List processors; + + // --------------------------------------------------------------------------------------------- + + private PipelineConfig(Builder builder) { + + this.description = builder.description; + this.version = builder.version; + this.processors = ApiTypeHelper.unmodifiableRequired(builder.processors, this, "processors"); + + } + + public static PipelineConfig of(Function> fn) { + return fn.apply(new Builder()).build(); + } + + /** + * Description of the ingest pipeline. + *

    + * API name: {@code description} + */ + @Nullable + public final String description() { + return this.description; + } + + /** + * Version number used by external systems to track ingest pipelines. + *

    + * API name: {@code version} + */ + @Nullable + public final Long version() { + return this.version; + } + + /** + * Required - Processors used to perform transformations on documents before + * indexing. Processors run sequentially in the order specified. + *

    + * API name: {@code processors} + */ + public final List processors() { + return this.processors; + } + + /** + * Serialize this object to JSON. + */ + public void serialize(JsonGenerator generator, JsonpMapper mapper) { + generator.writeStartObject(); + serializeInternal(generator, mapper); + generator.writeEnd(); + } + + protected void serializeInternal(JsonGenerator generator, JsonpMapper mapper) { + + if (this.description != null) { + generator.writeKey("description"); + generator.write(this.description); + + } + if (this.version != null) { + generator.writeKey("version"); + generator.write(this.version); + + } + if (ApiTypeHelper.isDefined(this.processors)) { + generator.writeKey("processors"); + generator.writeStartArray(); + for (Processor item0 : this.processors) { + item0.serialize(generator, mapper); + + } + generator.writeEnd(); + + } + + } + + @Override + public String toString() { + return JsonpUtils.toString(this); + } + + // --------------------------------------------------------------------------------------------- + + /** + * Builder for {@link PipelineConfig}. + */ + + public static class Builder extends WithJsonObjectBuilderBase implements ObjectBuilder { + @Nullable + private String description; + + @Nullable + private Long version; + + private List processors; + + /** + * Description of the ingest pipeline. + *

    + * API name: {@code description} + */ + public final Builder description(@Nullable String value) { + this.description = value; + return this; + } + + /** + * Version number used by external systems to track ingest pipelines. + *

    + * API name: {@code version} + */ + public final Builder version(@Nullable Long value) { + this.version = value; + return this; + } + + /** + * Required - Processors used to perform transformations on documents before + * indexing. Processors run sequentially in the order specified. + *

    + * API name: {@code processors} + *

    + * Adds all elements of list to processors. + */ + public final Builder processors(List list) { + this.processors = _listAddAll(this.processors, list); + return this; + } + + /** + * Required - Processors used to perform transformations on documents before + * indexing. Processors run sequentially in the order specified. + *

    + * API name: {@code processors} + *

    + * Adds one or more values to processors. + */ + public final Builder processors(Processor value, Processor... values) { + this.processors = _listAdd(this.processors, value, values); + return this; + } + + /** + * Required - Processors used to perform transformations on documents before + * indexing. Processors run sequentially in the order specified. + *

    + * API name: {@code processors} + *

    + * Adds a value to processors using a builder lambda. + */ + public final Builder processors(Function> fn) { + return processors(fn.apply(new Processor.Builder()).build()); + } + + @Override + protected Builder self() { + return this; + } + + /** + * Builds a {@link PipelineConfig}. + * + * @throws NullPointerException + * if some of the required fields are null. + */ + public PipelineConfig build() { + _checkSingleUse(); + + return new PipelineConfig(this); + } + } + + // --------------------------------------------------------------------------------------------- + + /** + * Json deserializer for {@link PipelineConfig} + */ + public static final JsonpDeserializer _DESERIALIZER = ObjectBuilderDeserializer.lazy(Builder::new, + PipelineConfig::setupPipelineConfigDeserializer); + + protected static void setupPipelineConfigDeserializer(ObjectDeserializer op) { + + op.add(Builder::description, JsonpDeserializer.stringDeserializer(), "description"); + op.add(Builder::version, JsonpDeserializer.longDeserializer(), "version"); + op.add(Builder::processors, JsonpDeserializer.arrayDeserializer(Processor._DESERIALIZER), "processors"); + + } + +} diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/ingest/PutGeoipDatabaseRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/ingest/PutGeoipDatabaseRequest.java index 30855196d..55fcc0376 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/ingest/PutGeoipDatabaseRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/ingest/PutGeoipDatabaseRequest.java @@ -58,8 +58,8 @@ // typedef: ingest.put_geoip_database.Request /** - * Create or update GeoIP database configurations. Create or update IP - * geolocation database configurations. + * Create or update a GeoIP database configuration. Refer to the create or + * update IP geolocation database configuration API. * * @see API diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/ingest/PutIpLocationDatabaseRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/ingest/PutIpLocationDatabaseRequest.java index 141b98cd9..521c35a4e 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/ingest/PutIpLocationDatabaseRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/ingest/PutIpLocationDatabaseRequest.java @@ -59,7 +59,7 @@ // typedef: ingest.put_ip_location_database.Request /** - * Returns information about one or more IP location database configurations. + * Create or update an IP geolocation database configuration. * * @see API @@ -93,7 +93,7 @@ public static PutIpLocationDatabaseRequest of(Function * API name: {@code id} */ @@ -102,8 +102,9 @@ public final String id() { } /** - * Period to wait for a connection to the master node. If no response is + * The period to wait for a connection to the master node. If no response is * received before the timeout expires, the request fails and returns an error. + * A value of -1 indicates that the request should never time out. *

    * API name: {@code master_timeout} */ @@ -113,8 +114,11 @@ public final Time masterTimeout() { } /** - * Period to wait for a response. If no response is received before the timeout - * expires, the request fails and returns an error. + * The period to wait for a response from all relevant nodes in the cluster + * after updating the cluster metadata. If no response is received before the + * timeout expires, the cluster metadata update still applies but the response + * indicates that it was not completely acknowledged. A value of -1 + * indicates that the request should never time out. *

    * API name: {@code timeout} */ @@ -158,7 +162,7 @@ public static class Builder extends RequestBase.AbstractBuilder private DatabaseConfiguration configuration; /** - * Required - ID of the database configuration to create or update. + * Required - The database configuration identifier. *

    * API name: {@code id} */ @@ -168,8 +172,9 @@ public final Builder id(String value) { } /** - * Period to wait for a connection to the master node. If no response is + * The period to wait for a connection to the master node. If no response is * received before the timeout expires, the request fails and returns an error. + * A value of -1 indicates that the request should never time out. *

    * API name: {@code master_timeout} */ @@ -179,8 +184,9 @@ public final Builder masterTimeout(@Nullable Time value) { } /** - * Period to wait for a connection to the master node. If no response is + * The period to wait for a connection to the master node. If no response is * received before the timeout expires, the request fails and returns an error. + * A value of -1 indicates that the request should never time out. *

    * API name: {@code master_timeout} */ @@ -189,8 +195,11 @@ public final Builder masterTimeout(Function> f } /** - * Period to wait for a response. If no response is received before the timeout - * expires, the request fails and returns an error. + * The period to wait for a response from all relevant nodes in the cluster + * after updating the cluster metadata. If no response is received before the + * timeout expires, the cluster metadata update still applies but the response + * indicates that it was not completely acknowledged. A value of -1 + * indicates that the request should never time out. *

    * API name: {@code timeout} */ @@ -200,8 +209,11 @@ public final Builder timeout(@Nullable Time value) { } /** - * Period to wait for a response. If no response is received before the timeout - * expires, the request fails and returns an error. + * The period to wait for a response from all relevant nodes in the cluster + * after updating the cluster metadata. If no response is received before the + * timeout expires, the cluster metadata update still applies but the response + * indicates that it was not completely acknowledged. A value of -1 + * indicates that the request should never time out. *

    * API name: {@code timeout} */ diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/license/ElasticsearchLicenseAsyncClient.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/license/ElasticsearchLicenseAsyncClient.java index d1f95c494..de537867f 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/license/ElasticsearchLicenseAsyncClient.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/license/ElasticsearchLicenseAsyncClient.java @@ -343,7 +343,7 @@ public CompletableFuture postStartBasic() { * To check the status of your trial, use the get trial status API. * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/start-trial.html">Documentation * on elastic.co */ @@ -370,7 +370,7 @@ public CompletableFuture postStartTrial(PostStartTrialRe * a function that initializes a builder to create the * {@link PostStartTrialRequest} * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/start-trial.html">Documentation * on elastic.co */ @@ -392,7 +392,7 @@ public final CompletableFuture postStartTrial( * To check the status of your trial, use the get trial status API. * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/start-trial.html">Documentation * on elastic.co */ diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/license/ElasticsearchLicenseClient.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/license/ElasticsearchLicenseClient.java index f6e8662b6..34b794347 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/license/ElasticsearchLicenseClient.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/license/ElasticsearchLicenseClient.java @@ -344,7 +344,7 @@ public PostStartBasicResponse postStartBasic() throws IOException, Elasticsearch * To check the status of your trial, use the get trial status API. * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/start-trial.html">Documentation * on elastic.co */ @@ -372,7 +372,7 @@ public PostStartTrialResponse postStartTrial(PostStartTrialRequest request) * a function that initializes a builder to create the * {@link PostStartTrialRequest} * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/start-trial.html">Documentation * on elastic.co */ @@ -395,7 +395,7 @@ public final PostStartTrialResponse postStartTrial( * To check the status of your trial, use the get trial status API. * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/start-trial.html">Documentation * on elastic.co */ diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/logstash/DeletePipelineRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/logstash/DeletePipelineRequest.java index ffa231af7..92d314155 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/logstash/DeletePipelineRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/logstash/DeletePipelineRequest.java @@ -58,7 +58,9 @@ // typedef: logstash.delete_pipeline.Request /** - * Deletes a pipeline used for Logstash Central Management. + * Delete a Logstash pipeline. Delete a pipeline that is used for Logstash + * Central Management. If the request succeeds, you receive an empty response + * with an appropriate status code. * * @see API @@ -81,7 +83,7 @@ public static DeletePipelineRequest of(Function * API name: {@code id} */ @@ -101,7 +103,7 @@ public static class Builder extends RequestBase.AbstractBuilder private String id; /** - * Required - Identifier for the pipeline. + * Required - An identifier for the pipeline. *

    * API name: {@code id} */ diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/logstash/ElasticsearchLogstashAsyncClient.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/logstash/ElasticsearchLogstashAsyncClient.java index af70c837a..2657da0e3 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/logstash/ElasticsearchLogstashAsyncClient.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/logstash/ElasticsearchLogstashAsyncClient.java @@ -71,10 +71,12 @@ public ElasticsearchLogstashAsyncClient withTransportOptions(@Nullable Transport // ----- Endpoint: logstash.delete_pipeline /** - * Deletes a pipeline used for Logstash Central Management. + * Delete a Logstash pipeline. Delete a pipeline that is used for Logstash + * Central Management. If the request succeeds, you receive an empty response + * with an appropriate status code. * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/logstash-api-delete-pipeline.html">Documentation * on elastic.co */ @@ -86,13 +88,15 @@ public CompletableFuture deletePipeline(DeletePipelineRequest r } /** - * Deletes a pipeline used for Logstash Central Management. + * Delete a Logstash pipeline. Delete a pipeline that is used for Logstash + * Central Management. If the request succeeds, you receive an empty response + * with an appropriate status code. * * @param fn * a function that initializes a builder to create the * {@link DeletePipelineRequest} * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/logstash-api-delete-pipeline.html">Documentation * on elastic.co */ @@ -104,10 +108,11 @@ public final CompletableFuture deletePipeline( // ----- Endpoint: logstash.get_pipeline /** - * Retrieves pipelines used for Logstash Central Management. + * Get Logstash pipelines. Get pipelines that are used for Logstash Central + * Management. * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/logstash-api-get-pipeline.html">Documentation * on elastic.co */ @@ -119,13 +124,14 @@ public CompletableFuture getPipeline(GetPipelineRequest req } /** - * Retrieves pipelines used for Logstash Central Management. + * Get Logstash pipelines. Get pipelines that are used for Logstash Central + * Management. * * @param fn * a function that initializes a builder to create the * {@link GetPipelineRequest} * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/logstash-api-get-pipeline.html">Documentation * on elastic.co */ @@ -135,10 +141,11 @@ public final CompletableFuture getPipeline( } /** - * Retrieves pipelines used for Logstash Central Management. + * Get Logstash pipelines. Get pipelines that are used for Logstash Central + * Management. * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/logstash-api-get-pipeline.html">Documentation * on elastic.co */ @@ -150,10 +157,13 @@ public CompletableFuture getPipeline() { // ----- Endpoint: logstash.put_pipeline /** - * Creates or updates a pipeline used for Logstash Central Management. + * Create or update a Logstash pipeline. + *

    + * Create a pipeline that is used for Logstash Central Management. If the + * specified pipeline exists, it is replaced. * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/logstash-api-put-pipeline.html">Documentation * on elastic.co */ @@ -165,13 +175,16 @@ public CompletableFuture putPipeline(PutPipelineRequest request } /** - * Creates or updates a pipeline used for Logstash Central Management. + * Create or update a Logstash pipeline. + *

    + * Create a pipeline that is used for Logstash Central Management. If the + * specified pipeline exists, it is replaced. * * @param fn * a function that initializes a builder to create the * {@link PutPipelineRequest} * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/logstash-api-put-pipeline.html">Documentation * on elastic.co */ diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/logstash/ElasticsearchLogstashClient.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/logstash/ElasticsearchLogstashClient.java index d0fa36a78..aa9ab23c8 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/logstash/ElasticsearchLogstashClient.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/logstash/ElasticsearchLogstashClient.java @@ -69,10 +69,12 @@ public ElasticsearchLogstashClient withTransportOptions(@Nullable TransportOptio // ----- Endpoint: logstash.delete_pipeline /** - * Deletes a pipeline used for Logstash Central Management. + * Delete a Logstash pipeline. Delete a pipeline that is used for Logstash + * Central Management. If the request succeeds, you receive an empty response + * with an appropriate status code. * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/logstash-api-delete-pipeline.html">Documentation * on elastic.co */ @@ -84,13 +86,15 @@ public BooleanResponse deletePipeline(DeletePipelineRequest request) throws IOEx } /** - * Deletes a pipeline used for Logstash Central Management. + * Delete a Logstash pipeline. Delete a pipeline that is used for Logstash + * Central Management. If the request succeeds, you receive an empty response + * with an appropriate status code. * * @param fn * a function that initializes a builder to create the * {@link DeletePipelineRequest} * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/logstash-api-delete-pipeline.html">Documentation * on elastic.co */ @@ -103,10 +107,11 @@ public final BooleanResponse deletePipeline( // ----- Endpoint: logstash.get_pipeline /** - * Retrieves pipelines used for Logstash Central Management. + * Get Logstash pipelines. Get pipelines that are used for Logstash Central + * Management. * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/logstash-api-get-pipeline.html">Documentation * on elastic.co */ @@ -118,13 +123,14 @@ public GetPipelineResponse getPipeline(GetPipelineRequest request) throws IOExce } /** - * Retrieves pipelines used for Logstash Central Management. + * Get Logstash pipelines. Get pipelines that are used for Logstash Central + * Management. * * @param fn * a function that initializes a builder to create the * {@link GetPipelineRequest} * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/logstash-api-get-pipeline.html">Documentation * on elastic.co */ @@ -135,10 +141,11 @@ public final GetPipelineResponse getPipeline( } /** - * Retrieves pipelines used for Logstash Central Management. + * Get Logstash pipelines. Get pipelines that are used for Logstash Central + * Management. * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/logstash-api-get-pipeline.html">Documentation * on elastic.co */ @@ -150,10 +157,13 @@ public GetPipelineResponse getPipeline() throws IOException, ElasticsearchExcept // ----- Endpoint: logstash.put_pipeline /** - * Creates or updates a pipeline used for Logstash Central Management. + * Create or update a Logstash pipeline. + *

    + * Create a pipeline that is used for Logstash Central Management. If the + * specified pipeline exists, it is replaced. * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/logstash-api-put-pipeline.html">Documentation * on elastic.co */ @@ -165,13 +175,16 @@ public BooleanResponse putPipeline(PutPipelineRequest request) throws IOExceptio } /** - * Creates or updates a pipeline used for Logstash Central Management. + * Create or update a Logstash pipeline. + *

    + * Create a pipeline that is used for Logstash Central Management. If the + * specified pipeline exists, it is replaced. * * @param fn * a function that initializes a builder to create the * {@link PutPipelineRequest} * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/logstash-api-put-pipeline.html">Documentation * on elastic.co */ diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/logstash/GetPipelineRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/logstash/GetPipelineRequest.java index bedf522d9..ba98e7915 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/logstash/GetPipelineRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/logstash/GetPipelineRequest.java @@ -58,7 +58,8 @@ // typedef: logstash.get_pipeline.Request /** - * Retrieves pipelines used for Logstash Central Management. + * Get Logstash pipelines. Get pipelines that are used for Logstash Central + * Management. * * @see API * specification @@ -80,7 +81,7 @@ public static GetPipelineRequest of(Function * API name: {@code id} */ @@ -101,7 +102,7 @@ public static class Builder extends RequestBase.AbstractBuilder private List id; /** - * Comma-separated list of pipeline identifiers. + * A comma-separated list of pipeline identifiers. *

    * API name: {@code id} *

    @@ -113,7 +114,7 @@ public final Builder id(List list) { } /** - * Comma-separated list of pipeline identifiers. + * A comma-separated list of pipeline identifiers. *

    * API name: {@code id} *

    diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/logstash/Pipeline.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/logstash/Pipeline.java index 069e7da6d..3619ab418 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/logstash/Pipeline.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/logstash/Pipeline.java @@ -64,24 +64,24 @@ public class Pipeline implements JsonpSerializable { private final DateTime lastModified; - private final PipelineMetadata pipelineMetadata; - - private final String username; - private final String pipeline; + private final PipelineMetadata pipelineMetadata; + private final PipelineSettings pipelineSettings; + private final String username; + // --------------------------------------------------------------------------------------------- private Pipeline(Builder builder) { this.description = ApiTypeHelper.requireNonNull(builder.description, this, "description"); this.lastModified = ApiTypeHelper.requireNonNull(builder.lastModified, this, "lastModified"); - this.pipelineMetadata = ApiTypeHelper.requireNonNull(builder.pipelineMetadata, this, "pipelineMetadata"); - this.username = ApiTypeHelper.requireNonNull(builder.username, this, "username"); this.pipeline = ApiTypeHelper.requireNonNull(builder.pipeline, this, "pipeline"); + this.pipelineMetadata = ApiTypeHelper.requireNonNull(builder.pipelineMetadata, this, "pipelineMetadata"); this.pipelineSettings = ApiTypeHelper.requireNonNull(builder.pipelineSettings, this, "pipelineSettings"); + this.username = ApiTypeHelper.requireNonNull(builder.username, this, "username"); } @@ -90,7 +90,7 @@ public static Pipeline of(Function> fn) { } /** - * Required - Description of the pipeline. This description is not used by + * Required - A description of the pipeline. This description is not used by * Elasticsearch or Logstash. *

    * API name: {@code description} @@ -100,7 +100,7 @@ public final String description() { } /** - * Required - Date the pipeline was last updated. Must be in the + * Required - The date the pipeline was last updated. It must be in the * yyyy-MM-dd'T'HH:mm:ss.SSSZZ strict_date_time format. *

    * API name: {@code last_modified} @@ -110,41 +110,41 @@ public final DateTime lastModified() { } /** - * Required - Optional metadata about the pipeline. May have any contents. This - * metadata is not generated or used by Elasticsearch or Logstash. + * Required - The configuration for the pipeline. *

    - * API name: {@code pipeline_metadata} + * API name: {@code pipeline} */ - public final PipelineMetadata pipelineMetadata() { - return this.pipelineMetadata; + public final String pipeline() { + return this.pipeline; } /** - * Required - User who last updated the pipeline. + * Required - Optional metadata about the pipeline, which can have any contents. + * This metadata is not generated or used by Elasticsearch or Logstash. *

    - * API name: {@code username} + * API name: {@code pipeline_metadata} */ - public final String username() { - return this.username; + public final PipelineMetadata pipelineMetadata() { + return this.pipelineMetadata; } /** - * Required - Configuration for the pipeline. + * Required - Settings for the pipeline. It supports only flat keys in dot + * notation. *

    - * API name: {@code pipeline} + * API name: {@code pipeline_settings} */ - public final String pipeline() { - return this.pipeline; + public final PipelineSettings pipelineSettings() { + return this.pipelineSettings; } /** - * Required - Settings for the pipeline. Supports only flat keys in dot - * notation. + * Required - The user who last updated the pipeline. *

    - * API name: {@code pipeline_settings} + * API name: {@code username} */ - public final PipelineSettings pipelineSettings() { - return this.pipelineSettings; + public final String username() { + return this.username; } /** @@ -163,18 +163,18 @@ protected void serializeInternal(JsonGenerator generator, JsonpMapper mapper) { generator.writeKey("last_modified"); this.lastModified.serialize(generator, mapper); - generator.writeKey("pipeline_metadata"); - this.pipelineMetadata.serialize(generator, mapper); - - generator.writeKey("username"); - generator.write(this.username); - generator.writeKey("pipeline"); generator.write(this.pipeline); + generator.writeKey("pipeline_metadata"); + this.pipelineMetadata.serialize(generator, mapper); + generator.writeKey("pipeline_settings"); this.pipelineSettings.serialize(generator, mapper); + generator.writeKey("username"); + generator.write(this.username); + } @Override @@ -193,16 +193,16 @@ public static class Builder extends WithJsonObjectBuilderBase implement private DateTime lastModified; - private PipelineMetadata pipelineMetadata; - - private String username; - private String pipeline; + private PipelineMetadata pipelineMetadata; + private PipelineSettings pipelineSettings; + private String username; + /** - * Required - Description of the pipeline. This description is not used by + * Required - A description of the pipeline. This description is not used by * Elasticsearch or Logstash. *

    * API name: {@code description} @@ -213,7 +213,7 @@ public final Builder description(String value) { } /** - * Required - Date the pipeline was last updated. Must be in the + * Required - The date the pipeline was last updated. It must be in the * yyyy-MM-dd'T'HH:mm:ss.SSSZZ strict_date_time format. *

    * API name: {@code last_modified} @@ -224,48 +224,38 @@ public final Builder lastModified(DateTime value) { } /** - * Required - Optional metadata about the pipeline. May have any contents. This - * metadata is not generated or used by Elasticsearch or Logstash. + * Required - The configuration for the pipeline. *

    - * API name: {@code pipeline_metadata} + * API name: {@code pipeline} */ - public final Builder pipelineMetadata(PipelineMetadata value) { - this.pipelineMetadata = value; + public final Builder pipeline(String value) { + this.pipeline = value; return this; } /** - * Required - Optional metadata about the pipeline. May have any contents. This - * metadata is not generated or used by Elasticsearch or Logstash. + * Required - Optional metadata about the pipeline, which can have any contents. + * This metadata is not generated or used by Elasticsearch or Logstash. *

    * API name: {@code pipeline_metadata} */ - public final Builder pipelineMetadata(Function> fn) { - return this.pipelineMetadata(fn.apply(new PipelineMetadata.Builder()).build()); - } - - /** - * Required - User who last updated the pipeline. - *

    - * API name: {@code username} - */ - public final Builder username(String value) { - this.username = value; + public final Builder pipelineMetadata(PipelineMetadata value) { + this.pipelineMetadata = value; return this; } /** - * Required - Configuration for the pipeline. + * Required - Optional metadata about the pipeline, which can have any contents. + * This metadata is not generated or used by Elasticsearch or Logstash. *

    - * API name: {@code pipeline} + * API name: {@code pipeline_metadata} */ - public final Builder pipeline(String value) { - this.pipeline = value; - return this; + public final Builder pipelineMetadata(Function> fn) { + return this.pipelineMetadata(fn.apply(new PipelineMetadata.Builder()).build()); } /** - * Required - Settings for the pipeline. Supports only flat keys in dot + * Required - Settings for the pipeline. It supports only flat keys in dot * notation. *

    * API name: {@code pipeline_settings} @@ -276,7 +266,7 @@ public final Builder pipelineSettings(PipelineSettings value) { } /** - * Required - Settings for the pipeline. Supports only flat keys in dot + * Required - Settings for the pipeline. It supports only flat keys in dot * notation. *

    * API name: {@code pipeline_settings} @@ -285,6 +275,16 @@ public final Builder pipelineSettings(Function + * API name: {@code username} + */ + public final Builder username(String value) { + this.username = value; + return this; + } + @Override protected Builder self() { return this; @@ -315,10 +315,10 @@ protected static void setupPipelineDeserializer(ObjectDeserializer + * Create a pipeline that is used for Logstash Central Management. If the + * specified pipeline exists, it is replaced. * * @see API * specification @@ -86,7 +89,7 @@ public static PutPipelineRequest of(Function * API name: {@code id} */ @@ -123,7 +126,7 @@ public static class Builder extends RequestBase.AbstractBuilder private Pipeline pipeline; /** - * Required - Identifier for the pipeline. + * Required - An identifier for the pipeline. *

    * API name: {@code id} */ diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/migration/DeprecationsRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/migration/DeprecationsRequest.java index 852165c22..64844c3c2 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/migration/DeprecationsRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/migration/DeprecationsRequest.java @@ -55,9 +55,12 @@ // typedef: migration.deprecations.Request /** - * Retrieves information about different cluster, node, and index level settings - * that use deprecated features that will be removed or changed in the next - * major version. + * Get deprecation information. Get information about different cluster, node, + * and index level settings that use deprecated features that will be removed or + * changed in the next major version. + *

    + * TIP: This APIs is designed for indirect use by the Upgrade Assistant. You are + * strongly recommended to use the Upgrade Assistant. * * @see API * specification diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/migration/DeprecationsResponse.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/migration/DeprecationsResponse.java index 71d9b4866..4850ae14c 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/migration/DeprecationsResponse.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/migration/DeprecationsResponse.java @@ -89,14 +89,20 @@ public static DeprecationsResponse of(Function + * API name: {@code cluster_settings} */ public final List clusterSettings() { return this.clusterSettings; } /** - * Required - API name: {@code index_settings} + * Required - Index warnings are sectioned off per index and can be filtered + * using an index-pattern in the query. This section includes warnings for the + * backing indices of data streams specified in the request path. + *

    + * API name: {@code index_settings} */ public final Map> indexSettings() { return this.indexSettings; @@ -110,14 +116,20 @@ public final Map> dataStreams() { } /** - * Required - API name: {@code node_settings} + * Required - Node-level deprecation warnings. Since only a subset of your nodes + * might incorporate these settings, it is important to read the details section + * for more information about which nodes are affected. + *

    + * API name: {@code node_settings} */ public final List nodeSettings() { return this.nodeSettings; } /** - * Required - API name: {@code ml_settings} + * Required - Machine learning-related deprecation warnings. + *

    + * API name: {@code ml_settings} */ public final List mlSettings() { return this.mlSettings; @@ -228,7 +240,9 @@ public static class Builder extends WithJsonObjectBuilderBase private List mlSettings; /** - * Required - API name: {@code cluster_settings} + * Required - Cluster-level deprecation warnings. + *

    + * API name: {@code cluster_settings} *

    * Adds all elements of list to clusterSettings. */ @@ -238,7 +252,9 @@ public final Builder clusterSettings(List list) { } /** - * Required - API name: {@code cluster_settings} + * Required - Cluster-level deprecation warnings. + *

    + * API name: {@code cluster_settings} *

    * Adds one or more values to clusterSettings. */ @@ -248,7 +264,9 @@ public final Builder clusterSettings(Deprecation value, Deprecation... values) { } /** - * Required - API name: {@code cluster_settings} + * Required - Cluster-level deprecation warnings. + *

    + * API name: {@code cluster_settings} *

    * Adds a value to clusterSettings using a builder lambda. */ @@ -257,7 +275,11 @@ public final Builder clusterSettings(Function + * API name: {@code index_settings} *

    * Adds all entries of map to indexSettings. */ @@ -267,7 +289,11 @@ public final Builder indexSettings(Map> map) { } /** - * Required - API name: {@code index_settings} + * Required - Index warnings are sectioned off per index and can be filtered + * using an index-pattern in the query. This section includes warnings for the + * backing indices of data streams specified in the request path. + *

    + * API name: {@code index_settings} *

    * Adds an entry to indexSettings. */ @@ -297,7 +323,11 @@ public final Builder dataStreams(String key, List value) { } /** - * Required - API name: {@code node_settings} + * Required - Node-level deprecation warnings. Since only a subset of your nodes + * might incorporate these settings, it is important to read the details section + * for more information about which nodes are affected. + *

    + * API name: {@code node_settings} *

    * Adds all elements of list to nodeSettings. */ @@ -307,7 +337,11 @@ public final Builder nodeSettings(List list) { } /** - * Required - API name: {@code node_settings} + * Required - Node-level deprecation warnings. Since only a subset of your nodes + * might incorporate these settings, it is important to read the details section + * for more information about which nodes are affected. + *

    + * API name: {@code node_settings} *

    * Adds one or more values to nodeSettings. */ @@ -317,7 +351,11 @@ public final Builder nodeSettings(Deprecation value, Deprecation... values) { } /** - * Required - API name: {@code node_settings} + * Required - Node-level deprecation warnings. Since only a subset of your nodes + * might incorporate these settings, it is important to read the details section + * for more information about which nodes are affected. + *

    + * API name: {@code node_settings} *

    * Adds a value to nodeSettings using a builder lambda. */ @@ -326,7 +364,9 @@ public final Builder nodeSettings(Function + * API name: {@code ml_settings} *

    * Adds all elements of list to mlSettings. */ @@ -336,7 +376,9 @@ public final Builder mlSettings(List list) { } /** - * Required - API name: {@code ml_settings} + * Required - Machine learning-related deprecation warnings. + *

    + * API name: {@code ml_settings} *

    * Adds one or more values to mlSettings. */ @@ -346,7 +388,9 @@ public final Builder mlSettings(Deprecation value, Deprecation... values) { } /** - * Required - API name: {@code ml_settings} + * Required - Machine learning-related deprecation warnings. + *

    + * API name: {@code ml_settings} *

    * Adds a value to mlSettings using a builder lambda. */ diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/migration/ElasticsearchMigrationAsyncClient.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/migration/ElasticsearchMigrationAsyncClient.java index a897b8d56..fdd14a26c 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/migration/ElasticsearchMigrationAsyncClient.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/migration/ElasticsearchMigrationAsyncClient.java @@ -70,12 +70,15 @@ public ElasticsearchMigrationAsyncClient withTransportOptions(@Nullable Transpor // ----- Endpoint: migration.deprecations /** - * Retrieves information about different cluster, node, and index level settings - * that use deprecated features that will be removed or changed in the next - * major version. + * Get deprecation information. Get information about different cluster, node, + * and index level settings that use deprecated features that will be removed or + * changed in the next major version. + *

    + * TIP: This APIs is designed for indirect use by the Upgrade Assistant. You are + * strongly recommended to use the Upgrade Assistant. * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/migration-api-deprecation.html">Documentation * on elastic.co */ @@ -87,15 +90,18 @@ public CompletableFuture deprecations(DeprecationsRequest } /** - * Retrieves information about different cluster, node, and index level settings - * that use deprecated features that will be removed or changed in the next - * major version. + * Get deprecation information. Get information about different cluster, node, + * and index level settings that use deprecated features that will be removed or + * changed in the next major version. + *

    + * TIP: This APIs is designed for indirect use by the Upgrade Assistant. You are + * strongly recommended to use the Upgrade Assistant. * * @param fn * a function that initializes a builder to create the * {@link DeprecationsRequest} * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/migration-api-deprecation.html">Documentation * on elastic.co */ @@ -105,12 +111,15 @@ public final CompletableFuture deprecations( } /** - * Retrieves information about different cluster, node, and index level settings - * that use deprecated features that will be removed or changed in the next - * major version. + * Get deprecation information. Get information about different cluster, node, + * and index level settings that use deprecated features that will be removed or + * changed in the next major version. + *

    + * TIP: This APIs is designed for indirect use by the Upgrade Assistant. You are + * strongly recommended to use the Upgrade Assistant. * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/migration-api-deprecation.html">Documentation * on elastic.co */ @@ -122,10 +131,16 @@ public CompletableFuture deprecations() { // ----- Endpoint: migration.get_feature_upgrade_status /** - * Find out whether system features need to be upgraded or not + * Get feature migration information. Version upgrades sometimes require changes + * to how features store configuration information and data in system indices. + * Check which features need to be migrated and the status of any migrations + * that are in progress. + *

    + * TIP: This API is designed for indirect use by the Upgrade Assistant. You are + * strongly recommended to use the Upgrade Assistant. * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/feature-migration-api.html">Documentation * on elastic.co */ public CompletableFuture getFeatureUpgradeStatus() { @@ -136,10 +151,18 @@ public CompletableFuture getFeatureUpgradeStatu // ----- Endpoint: migration.post_feature_upgrade /** - * Begin upgrades for system features + * Start the feature migration. Version upgrades sometimes require changes to + * how features store configuration information and data in system indices. This + * API starts the automatic migration process. + *

    + * Some functionality might be temporarily unavailable during the migration + * process. + *

    + * TIP: The API is designed for indirect use by the Upgrade Assistant. We + * strongly recommend you use the Upgrade Assistant. * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/feature-migration-api.html">Documentation * on elastic.co */ public CompletableFuture postFeatureUpgrade() { diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/migration/ElasticsearchMigrationClient.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/migration/ElasticsearchMigrationClient.java index 0ec1c3ffd..926a27b99 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/migration/ElasticsearchMigrationClient.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/migration/ElasticsearchMigrationClient.java @@ -68,12 +68,15 @@ public ElasticsearchMigrationClient withTransportOptions(@Nullable TransportOpti // ----- Endpoint: migration.deprecations /** - * Retrieves information about different cluster, node, and index level settings - * that use deprecated features that will be removed or changed in the next - * major version. + * Get deprecation information. Get information about different cluster, node, + * and index level settings that use deprecated features that will be removed or + * changed in the next major version. + *

    + * TIP: This APIs is designed for indirect use by the Upgrade Assistant. You are + * strongly recommended to use the Upgrade Assistant. * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/migration-api-deprecation.html">Documentation * on elastic.co */ @@ -85,15 +88,18 @@ public DeprecationsResponse deprecations(DeprecationsRequest request) throws IOE } /** - * Retrieves information about different cluster, node, and index level settings - * that use deprecated features that will be removed or changed in the next - * major version. + * Get deprecation information. Get information about different cluster, node, + * and index level settings that use deprecated features that will be removed or + * changed in the next major version. + *

    + * TIP: This APIs is designed for indirect use by the Upgrade Assistant. You are + * strongly recommended to use the Upgrade Assistant. * * @param fn * a function that initializes a builder to create the * {@link DeprecationsRequest} * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/migration-api-deprecation.html">Documentation * on elastic.co */ @@ -104,12 +110,15 @@ public final DeprecationsResponse deprecations( } /** - * Retrieves information about different cluster, node, and index level settings - * that use deprecated features that will be removed or changed in the next - * major version. + * Get deprecation information. Get information about different cluster, node, + * and index level settings that use deprecated features that will be removed or + * changed in the next major version. + *

    + * TIP: This APIs is designed for indirect use by the Upgrade Assistant. You are + * strongly recommended to use the Upgrade Assistant. * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/migration-api-deprecation.html">Documentation * on elastic.co */ @@ -121,10 +130,16 @@ public DeprecationsResponse deprecations() throws IOException, ElasticsearchExce // ----- Endpoint: migration.get_feature_upgrade_status /** - * Find out whether system features need to be upgraded or not + * Get feature migration information. Version upgrades sometimes require changes + * to how features store configuration information and data in system indices. + * Check which features need to be migrated and the status of any migrations + * that are in progress. + *

    + * TIP: This API is designed for indirect use by the Upgrade Assistant. You are + * strongly recommended to use the Upgrade Assistant. * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/feature-migration-api.html">Documentation * on elastic.co */ public GetFeatureUpgradeStatusResponse getFeatureUpgradeStatus() throws IOException, ElasticsearchException { @@ -135,10 +150,18 @@ public GetFeatureUpgradeStatusResponse getFeatureUpgradeStatus() throws IOExcept // ----- Endpoint: migration.post_feature_upgrade /** - * Begin upgrades for system features + * Start the feature migration. Version upgrades sometimes require changes to + * how features store configuration information and data in system indices. This + * API starts the automatic migration process. + *

    + * Some functionality might be temporarily unavailable during the migration + * process. + *

    + * TIP: The API is designed for indirect use by the Upgrade Assistant. We + * strongly recommend you use the Upgrade Assistant. * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/feature-migration-api.html">Documentation * on elastic.co */ public PostFeatureUpgradeResponse postFeatureUpgrade() throws IOException, ElasticsearchException { diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/migration/GetFeatureUpgradeStatusRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/migration/GetFeatureUpgradeStatusRequest.java index 27411dc69..17649e75e 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/migration/GetFeatureUpgradeStatusRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/migration/GetFeatureUpgradeStatusRequest.java @@ -50,7 +50,13 @@ // typedef: migration.get_feature_upgrade_status.Request /** - * Find out whether system features need to be upgraded or not + * Get feature migration information. Version upgrades sometimes require changes + * to how features store configuration information and data in system indices. + * Check which features need to be migrated and the status of any migrations + * that are in progress. + *

    + * TIP: This API is designed for indirect use by the Upgrade Assistant. You are + * strongly recommended to use the Upgrade Assistant. * * @see API diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/migration/PostFeatureUpgradeRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/migration/PostFeatureUpgradeRequest.java index c016b79f8..95e329319 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/migration/PostFeatureUpgradeRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/migration/PostFeatureUpgradeRequest.java @@ -50,7 +50,15 @@ // typedef: migration.post_feature_upgrade.Request /** - * Begin upgrades for system features + * Start the feature migration. Version upgrades sometimes require changes to + * how features store configuration information and data in system indices. This + * API starts the automatic migration process. + *

    + * Some functionality might be temporarily unavailable during the migration + * process. + *

    + * TIP: The API is designed for indirect use by the Upgrade Assistant. We + * strongly recommend you use the Upgrade Assistant. * * @see API diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/migration/deprecations/Deprecation.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/migration/deprecations/Deprecation.java index 7a4caa705..a3f1b316d 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/migration/deprecations/Deprecation.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/migration/deprecations/Deprecation.java @@ -95,6 +95,8 @@ public static Deprecation of(Function> fn) { } /** + * Optional details about the deprecation warning. + *

    * API name: {@code details} */ @Nullable @@ -112,14 +114,19 @@ public final DeprecationLevel level() { } /** - * Required - API name: {@code message} + * Required - Descriptive information about the deprecation warning. + *

    + * API name: {@code message} */ public final String message() { return this.message; } /** - * Required - API name: {@code url} + * Required - A link to the breaking change documentation, where you can find + * more information about this change. + *

    + * API name: {@code url} */ public final String url() { return this.url; @@ -207,6 +214,8 @@ public static class Builder extends WithJsonObjectBuilderBase implement private Map meta; /** + * Optional details about the deprecation warning. + *

    * API name: {@code details} */ public final Builder details(@Nullable String value) { @@ -225,7 +234,9 @@ public final Builder level(DeprecationLevel value) { } /** - * Required - API name: {@code message} + * Required - Descriptive information about the deprecation warning. + *

    + * API name: {@code message} */ public final Builder message(String value) { this.message = value; @@ -233,7 +244,10 @@ public final Builder message(String value) { } /** - * Required - API name: {@code url} + * Required - A link to the breaking change documentation, where you can find + * more information about this change. + *

    + * API name: {@code url} */ public final Builder url(String value) { this.url = value; diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/ml/ElasticsearchMlAsyncClient.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/ml/ElasticsearchMlAsyncClient.java index 2ed26bfaf..7badf01d5 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/ml/ElasticsearchMlAsyncClient.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/ml/ElasticsearchMlAsyncClient.java @@ -1832,7 +1832,7 @@ public final CompletableFuture inferTrainedModel( // ----- Endpoint: ml.info /** - * Return ML defaults and limits. Returns defaults and limits used by machine + * Get machine learning information. Get defaults and limits used by machine * learning. This endpoint is designed to be used by a user interface that needs * to fully understand machine learning configurations where some options are * not specified, meaning that the defaults should be used. This endpoint may be @@ -3200,7 +3200,7 @@ public final CompletableFuture upgradeJobSnapshot( // ----- Endpoint: ml.validate /** - * Validates an anomaly detection job. + * Validate an anomaly detection job. * * @see Documentation @@ -3215,7 +3215,7 @@ public CompletableFuture validate(ValidateRequest request) { } /** - * Validates an anomaly detection job. + * Validate an anomaly detection job. * * @param fn * a function that initializes a builder to create the @@ -3231,7 +3231,7 @@ public final CompletableFuture validate( } /** - * Validates an anomaly detection job. + * Validate an anomaly detection job. * * @see Documentation @@ -3246,7 +3246,7 @@ public CompletableFuture validate() { // ----- Endpoint: ml.validate_detector /** - * Validates an anomaly detection detector. + * Validate an anomaly detection job. * * @see Documentation @@ -3261,7 +3261,7 @@ public CompletableFuture validateDetector(ValidateDete } /** - * Validates an anomaly detection detector. + * Validate an anomaly detection job. * * @param fn * a function that initializes a builder to create the @@ -3277,7 +3277,7 @@ public final CompletableFuture validateDetector( } /** - * Validates an anomaly detection detector. + * Validate an anomaly detection job. * * @see Documentation diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/ml/ElasticsearchMlClient.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/ml/ElasticsearchMlClient.java index 347da76d1..885712ef7 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/ml/ElasticsearchMlClient.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/ml/ElasticsearchMlClient.java @@ -1883,7 +1883,7 @@ public final InferTrainedModelResponse inferTrainedModel( // ----- Endpoint: ml.info /** - * Return ML defaults and limits. Returns defaults and limits used by machine + * Get machine learning information. Get defaults and limits used by machine * learning. This endpoint is designed to be used by a user interface that needs * to fully understand machine learning configurations where some options are * not specified, meaning that the defaults should be used. This endpoint may be @@ -3287,7 +3287,7 @@ public final UpgradeJobSnapshotResponse upgradeJobSnapshot( // ----- Endpoint: ml.validate /** - * Validates an anomaly detection job. + * Validate an anomaly detection job. * * @see Documentation @@ -3302,7 +3302,7 @@ public ValidateResponse validate(ValidateRequest request) throws IOException, El } /** - * Validates an anomaly detection job. + * Validate an anomaly detection job. * * @param fn * a function that initializes a builder to create the @@ -3318,7 +3318,7 @@ public final ValidateResponse validate(FunctionDocumentation @@ -3333,7 +3333,7 @@ public ValidateResponse validate() throws IOException, ElasticsearchException { // ----- Endpoint: ml.validate_detector /** - * Validates an anomaly detection detector. + * Validate an anomaly detection job. * * @see Documentation @@ -3349,7 +3349,7 @@ public ValidateDetectorResponse validateDetector(ValidateDetectorRequest request } /** - * Validates an anomaly detection detector. + * Validate an anomaly detection job. * * @param fn * a function that initializes a builder to create the @@ -3366,7 +3366,7 @@ public final ValidateDetectorResponse validateDetector( } /** - * Validates an anomaly detection detector. + * Validate an anomaly detection job. * * @see Documentation diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/ml/EvaluateDataFrameResponse.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/ml/EvaluateDataFrameResponse.java index 2a15847dc..b398b33b0 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/ml/EvaluateDataFrameResponse.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/ml/EvaluateDataFrameResponse.java @@ -84,6 +84,9 @@ public static EvaluateDataFrameResponse of(Function * API name: {@code classification} */ @Nullable @@ -92,6 +95,9 @@ public final DataframeClassificationSummary classification() { } /** + * Evaluation results for an outlier detection analysis. It outputs the + * probability that each document is an outlier. + *

    * API name: {@code outlier_detection} */ @Nullable @@ -100,6 +106,9 @@ public final DataframeOutlierDetectionSummary outlierDetection() { } /** + * Evaluation results for a regression analysis which outputs a prediction of + * values. + *

    * API name: {@code regression} */ @Nullable @@ -160,6 +169,9 @@ public static class Builder extends WithJsonObjectBuilderBase private DataframeRegressionSummary regression; /** + * Evaluation results for a classification analysis. It outputs a prediction + * that identifies to which of the classes each document belongs. + *

    * API name: {@code classification} */ public final Builder classification(@Nullable DataframeClassificationSummary value) { @@ -168,6 +180,9 @@ public final Builder classification(@Nullable DataframeClassificationSummary val } /** + * Evaluation results for a classification analysis. It outputs a prediction + * that identifies to which of the classes each document belongs. + *

    * API name: {@code classification} */ public final Builder classification( @@ -176,6 +191,9 @@ public final Builder classification( } /** + * Evaluation results for an outlier detection analysis. It outputs the + * probability that each document is an outlier. + *

    * API name: {@code outlier_detection} */ public final Builder outlierDetection(@Nullable DataframeOutlierDetectionSummary value) { @@ -184,6 +202,9 @@ public final Builder outlierDetection(@Nullable DataframeOutlierDetectionSummary } /** + * Evaluation results for an outlier detection analysis. It outputs the + * probability that each document is an outlier. + *

    * API name: {@code outlier_detection} */ public final Builder outlierDetection( @@ -192,6 +213,9 @@ public final Builder outlierDetection( } /** + * Evaluation results for a regression analysis which outputs a prediction of + * values. + *

    * API name: {@code regression} */ public final Builder regression(@Nullable DataframeRegressionSummary value) { @@ -200,6 +224,9 @@ public final Builder regression(@Nullable DataframeRegressionSummary value) { } /** + * Evaluation results for a regression analysis which outputs a prediction of + * values. + *

    * API name: {@code regression} */ public final Builder regression( diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/ml/MlInfoRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/ml/MlInfoRequest.java index eb99d7a47..ebc509a2e 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/ml/MlInfoRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/ml/MlInfoRequest.java @@ -50,7 +50,7 @@ // typedef: ml.info.Request /** - * Return ML defaults and limits. Returns defaults and limits used by machine + * Get machine learning information. Get defaults and limits used by machine * learning. This endpoint is designed to be used by a user interface that needs * to fully understand machine learning configurations where some options are * not specified, meaning that the defaults should be used. This endpoint may be diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/ml/ValidateDetectorRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/ml/ValidateDetectorRequest.java index bb3e5f3b5..c7d84909c 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/ml/ValidateDetectorRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/ml/ValidateDetectorRequest.java @@ -56,7 +56,7 @@ // typedef: ml.validate_detector.Request /** - * Validates an anomaly detection detector. + * Validate an anomaly detection job. * * @see API * specification diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/ml/ValidateRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/ml/ValidateRequest.java index 410afa447..f10625488 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/ml/ValidateRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/ml/ValidateRequest.java @@ -56,7 +56,7 @@ // typedef: ml.validate.Request /** - * Validates an anomaly detection job. + * Validate an anomaly detection job. * * @see API * specification diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/monitoring/BulkRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/monitoring/BulkRequest.java index 768360106..dbb680041 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/monitoring/BulkRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/monitoring/BulkRequest.java @@ -62,7 +62,8 @@ // typedef: monitoring.bulk.Request /** - * Used by the monitoring features to send monitoring data. + * Send monitoring data. This API is used by the monitoring features to send + * monitoring data. * * @see API * specification diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/monitoring/ElasticsearchMonitoringAsyncClient.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/monitoring/ElasticsearchMonitoringAsyncClient.java index aad05d73a..9dc19d082 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/monitoring/ElasticsearchMonitoringAsyncClient.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/monitoring/ElasticsearchMonitoringAsyncClient.java @@ -70,7 +70,8 @@ public ElasticsearchMonitoringAsyncClient withTransportOptions(@Nullable Transpo // ----- Endpoint: monitoring.bulk /** - * Used by the monitoring features to send monitoring data. + * Send monitoring data. This API is used by the monitoring features to send + * monitoring data. * * @see Documentation @@ -85,7 +86,8 @@ public CompletableFuture bulk(BulkRequest request) { } /** - * Used by the monitoring features to send monitoring data. + * Send monitoring data. This API is used by the monitoring features to send + * monitoring data. * * @param fn * a function that initializes a builder to create the diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/monitoring/ElasticsearchMonitoringClient.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/monitoring/ElasticsearchMonitoringClient.java index 0c0fb8752..1ad60de63 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/monitoring/ElasticsearchMonitoringClient.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/monitoring/ElasticsearchMonitoringClient.java @@ -69,7 +69,8 @@ public ElasticsearchMonitoringClient withTransportOptions(@Nullable TransportOpt // ----- Endpoint: monitoring.bulk /** - * Used by the monitoring features to send monitoring data. + * Send monitoring data. This API is used by the monitoring features to send + * monitoring data. * * @see Documentation @@ -84,7 +85,8 @@ public BulkResponse bulk(BulkRequest request) throws IOException, ElasticsearchE } /** - * Used by the monitoring features to send monitoring data. + * Send monitoring data. This API is used by the monitoring features to send + * monitoring data. * * @param fn * a function that initializes a builder to create the diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/nodes/ClearRepositoriesMeteringArchiveRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/nodes/ClearRepositoriesMeteringArchiveRequest.java index 2f6e7b98f..4f93edd15 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/nodes/ClearRepositoriesMeteringArchiveRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/nodes/ClearRepositoriesMeteringArchiveRequest.java @@ -87,9 +87,8 @@ public static ClearRepositoriesMeteringArchiveRequest of( } /** - * Required - Specifies the maximum archive_version - * to be cleared from the archive. + * Required - Specifies the maximum archive_version to be cleared + * from the archive. *

    * API name: {@code max_archive_version} */ @@ -99,8 +98,7 @@ public final long maxArchiveVersion() { /** * Required - Comma-separated list of node IDs or names used to limit returned - * information. All the nodes selective options are explained here. + * information. *

    * API name: {@code node_id} */ @@ -122,9 +120,8 @@ public static class Builder extends RequestBase.AbstractBuilder private List nodeId; /** - * Required - Specifies the maximum archive_version - * to be cleared from the archive. + * Required - Specifies the maximum archive_version to be cleared + * from the archive. *

    * API name: {@code max_archive_version} */ @@ -135,8 +132,7 @@ public final Builder maxArchiveVersion(long value) { /** * Required - Comma-separated list of node IDs or names used to limit returned - * information. All the nodes selective options are explained here. + * information. *

    * API name: {@code node_id} *

    @@ -149,8 +145,7 @@ public final Builder nodeId(List list) { /** * Required - Comma-separated list of node IDs or names used to limit returned - * information. All the nodes selective options are explained here. + * information. *

    * API name: {@code node_id} *

    diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/nodes/ElasticsearchNodesAsyncClient.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/nodes/ElasticsearchNodesAsyncClient.java index 16b46ba70..9e6fe8d0e 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/nodes/ElasticsearchNodesAsyncClient.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/nodes/ElasticsearchNodesAsyncClient.java @@ -72,7 +72,7 @@ public ElasticsearchNodesAsyncClient withTransportOptions(@Nullable TransportOpt * metering information in the cluster. * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/clear-repositories-metering-archive-api.html">Documentation * on elastic.co */ @@ -92,7 +92,7 @@ public CompletableFuture clearReposito * a function that initializes a builder to create the * {@link ClearRepositoriesMeteringArchiveRequest} * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/clear-repositories-metering-archive-api.html">Documentation * on elastic.co */ @@ -113,7 +113,7 @@ public final CompletableFuture clearRe * restarts. * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/get-repositories-metering-api.html">Documentation * on elastic.co */ @@ -137,7 +137,7 @@ public CompletableFuture getRepositoriesMet * a function that initializes a builder to create the * {@link GetRepositoriesMeteringInfoRequest} * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/get-repositories-metering-api.html">Documentation * on elastic.co */ diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/nodes/ElasticsearchNodesClient.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/nodes/ElasticsearchNodesClient.java index 0ad75d8f4..f56133822 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/nodes/ElasticsearchNodesClient.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/nodes/ElasticsearchNodesClient.java @@ -72,7 +72,7 @@ public ElasticsearchNodesClient withTransportOptions(@Nullable TransportOptions * metering information in the cluster. * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/clear-repositories-metering-archive-api.html">Documentation * on elastic.co */ @@ -92,7 +92,7 @@ public ClearRepositoriesMeteringArchiveResponse clearRepositoriesMeteringArchive * a function that initializes a builder to create the * {@link ClearRepositoriesMeteringArchiveRequest} * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/clear-repositories-metering-archive-api.html">Documentation * on elastic.co */ @@ -114,7 +114,7 @@ public final ClearRepositoriesMeteringArchiveResponse clearRepositoriesMeteringA * restarts. * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/get-repositories-metering-api.html">Documentation * on elastic.co */ @@ -138,7 +138,7 @@ public GetRepositoriesMeteringInfoResponse getRepositoriesMeteringInfo(GetReposi * a function that initializes a builder to create the * {@link GetRepositoriesMeteringInfoRequest} * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/get-repositories-metering-api.html">Documentation * on elastic.co */ diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/nodes/clear_repositories_metering_archive/ResponseBase.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/nodes/clear_repositories_metering_archive/ResponseBase.java index d532440f6..ac1b92c7d 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/nodes/clear_repositories_metering_archive/ResponseBase.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/nodes/clear_repositories_metering_archive/ResponseBase.java @@ -76,9 +76,8 @@ protected ResponseBase(AbstractBuilder builder) { } /** - * Required - Name of the cluster. Based on the Cluster - * name setting. + * Required - Name of the cluster. Based on the cluster.name + * setting. *

    * API name: {@code cluster_name} */ @@ -124,9 +123,8 @@ public abstract static class AbstractBuilder nodes; /** - * Required - Name of the cluster. Based on the Cluster - * name setting. + * Required - Name of the cluster. Based on the cluster.name + * setting. *

    * API name: {@code cluster_name} */ diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/nodes/get_repositories_metering_info/ResponseBase.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/nodes/get_repositories_metering_info/ResponseBase.java index de9b2870c..9e00775b8 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/nodes/get_repositories_metering_info/ResponseBase.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/nodes/get_repositories_metering_info/ResponseBase.java @@ -76,9 +76,8 @@ protected ResponseBase(AbstractBuilder builder) { } /** - * Required - Name of the cluster. Based on the Cluster - * name setting. + * Required - Name of the cluster. Based on the cluster.name + * setting. *

    * API name: {@code cluster_name} */ @@ -124,9 +123,8 @@ public abstract static class AbstractBuilder nodes; /** - * Required - Name of the cluster. Based on the Cluster - * name setting. + * Required - Name of the cluster. Based on the cluster.name + * setting. *

    * API name: {@code cluster_name} */ diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/query_rules/DeleteRuleRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/query_rules/DeleteRuleRequest.java index 86f361678..7ff412f63 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/query_rules/DeleteRuleRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/query_rules/DeleteRuleRequest.java @@ -56,7 +56,9 @@ // typedef: query_rules.delete_rule.Request /** - * Delete a query rule. Delete a query rule within a query ruleset. + * Delete a query rule. Delete a query rule within a query ruleset. This is a + * destructive action that is only recoverable by re-adding the same rule with + * the create or update query rule API. * * @see API * specification diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/query_rules/DeleteRulesetRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/query_rules/DeleteRulesetRequest.java index e94b5fc9f..2b3dd0377 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/query_rules/DeleteRulesetRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/query_rules/DeleteRulesetRequest.java @@ -56,7 +56,8 @@ // typedef: query_rules.delete_ruleset.Request /** - * Delete a query ruleset. + * Delete a query ruleset. Remove a query ruleset and its associated data. This + * is a destructive action that is not recoverable. * * @see API diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/query_rules/ElasticsearchQueryRulesAsyncClient.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/query_rules/ElasticsearchQueryRulesAsyncClient.java index 8035356b4..eb0f89e87 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/query_rules/ElasticsearchQueryRulesAsyncClient.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/query_rules/ElasticsearchQueryRulesAsyncClient.java @@ -70,10 +70,12 @@ public ElasticsearchQueryRulesAsyncClient withTransportOptions(@Nullable Transpo // ----- Endpoint: query_rules.delete_rule /** - * Delete a query rule. Delete a query rule within a query ruleset. + * Delete a query rule. Delete a query rule within a query ruleset. This is a + * destructive action that is only recoverable by re-adding the same rule with + * the create or update query rule API. * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/delete-query-rule.html">Documentation * on elastic.co */ @@ -85,13 +87,15 @@ public CompletableFuture deleteRule(DeleteRuleRequest reques } /** - * Delete a query rule. Delete a query rule within a query ruleset. + * Delete a query rule. Delete a query rule within a query ruleset. This is a + * destructive action that is only recoverable by re-adding the same rule with + * the create or update query rule API. * * @param fn * a function that initializes a builder to create the * {@link DeleteRuleRequest} * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/delete-query-rule.html">Documentation * on elastic.co */ @@ -103,10 +107,11 @@ public final CompletableFuture deleteRule( // ----- Endpoint: query_rules.delete_ruleset /** - * Delete a query ruleset. + * Delete a query ruleset. Remove a query ruleset and its associated data. This + * is a destructive action that is not recoverable. * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/delete-query-ruleset.html">Documentation * on elastic.co */ @@ -118,13 +123,14 @@ public CompletableFuture deleteRuleset(DeleteRulesetReque } /** - * Delete a query ruleset. + * Delete a query ruleset. Remove a query ruleset and its associated data. This + * is a destructive action that is not recoverable. * * @param fn * a function that initializes a builder to create the * {@link DeleteRulesetRequest} * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/delete-query-ruleset.html">Documentation * on elastic.co */ @@ -139,7 +145,7 @@ public final CompletableFuture deleteRuleset( * Get a query rule. Get details about a query rule within a query ruleset. * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/get-query-rule.html">Documentation * on elastic.co */ @@ -157,7 +163,7 @@ public CompletableFuture getRule(GetRuleRequest request) { * a function that initializes a builder to create the * {@link GetRuleRequest} * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/get-query-rule.html">Documentation * on elastic.co */ @@ -172,7 +178,7 @@ public final CompletableFuture getRule( * Get a query ruleset. Get details about a query ruleset. * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/get-query-ruleset.html">Documentation * on elastic.co */ @@ -190,7 +196,7 @@ public CompletableFuture getRuleset(GetRulesetRequest reques * a function that initializes a builder to create the * {@link GetRulesetRequest} * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/get-query-ruleset.html">Documentation * on elastic.co */ @@ -205,7 +211,7 @@ public final CompletableFuture getRuleset( * Get all query rulesets. Get summarized information about the query rulesets. * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/list-query-rulesets.html">Documentation * on elastic.co */ @@ -223,7 +229,7 @@ public CompletableFuture listRulesets(ListRulesetsRequest * a function that initializes a builder to create the * {@link ListRulesetsRequest} * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/list-query-rulesets.html">Documentation * on elastic.co */ @@ -236,7 +242,7 @@ public final CompletableFuture listRulesets( * Get all query rulesets. Get summarized information about the query rulesets. * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/list-query-rulesets.html">Documentation * on elastic.co */ @@ -250,9 +256,16 @@ public CompletableFuture listRulesets() { /** * Create or update a query rule. Create or update a query rule within a query * ruleset. + *

    + * IMPORTANT: Due to limitations within pinned queries, you can only pin + * documents using ids or docs, but cannot use both in single rule. It is + * advised to use one or the other in query rulesets, to avoid errors. + * Additionally, pinned queries have a maximum limit of 100 pinned hits. If + * multiple matching rules pin more than 100 documents, only the first 100 + * documents are pinned in the order they are specified in the ruleset. * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/put-query-rule.html">Documentation * on elastic.co */ @@ -266,12 +279,19 @@ public CompletableFuture putRule(PutRuleRequest request) { /** * Create or update a query rule. Create or update a query rule within a query * ruleset. + *

    + * IMPORTANT: Due to limitations within pinned queries, you can only pin + * documents using ids or docs, but cannot use both in single rule. It is + * advised to use one or the other in query rulesets, to avoid errors. + * Additionally, pinned queries have a maximum limit of 100 pinned hits. If + * multiple matching rules pin more than 100 documents, only the first 100 + * documents are pinned in the order they are specified in the ruleset. * * @param fn * a function that initializes a builder to create the * {@link PutRuleRequest} * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/put-query-rule.html">Documentation * on elastic.co */ @@ -283,10 +303,19 @@ public final CompletableFuture putRule( // ----- Endpoint: query_rules.put_ruleset /** - * Create or update a query ruleset. + * Create or update a query ruleset. There is a limit of 100 rules per ruleset. + * This limit can be increased by using the + * xpack.applications.rules.max_rules_per_ruleset cluster setting. + *

    + * IMPORTANT: Due to limitations within pinned queries, you can only select + * documents using ids or docs, but cannot use both in + * single rule. It is advised to use one or the other in query rulesets, to + * avoid errors. Additionally, pinned queries have a maximum limit of 100 pinned + * hits. If multiple matching rules pin more than 100 documents, only the first + * 100 documents are pinned in the order they are specified in the ruleset. * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/put-query-ruleset.html">Documentation * on elastic.co */ @@ -298,13 +327,22 @@ public CompletableFuture putRuleset(PutRulesetRequest reques } /** - * Create or update a query ruleset. + * Create or update a query ruleset. There is a limit of 100 rules per ruleset. + * This limit can be increased by using the + * xpack.applications.rules.max_rules_per_ruleset cluster setting. + *

    + * IMPORTANT: Due to limitations within pinned queries, you can only select + * documents using ids or docs, but cannot use both in + * single rule. It is advised to use one or the other in query rulesets, to + * avoid errors. Additionally, pinned queries have a maximum limit of 100 pinned + * hits. If multiple matching rules pin more than 100 documents, only the first + * 100 documents are pinned in the order they are specified in the ruleset. * * @param fn * a function that initializes a builder to create the * {@link PutRulesetRequest} * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/put-query-ruleset.html">Documentation * on elastic.co */ @@ -320,7 +358,7 @@ public final CompletableFuture putRuleset( * identify the rules that would match that criteria. * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/test-query-ruleset.html">Documentation * on elastic.co */ @@ -339,7 +377,7 @@ public CompletableFuture test(TestRequest request) { * a function that initializes a builder to create the * {@link TestRequest} * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/test-query-ruleset.html">Documentation * on elastic.co */ diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/query_rules/ElasticsearchQueryRulesClient.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/query_rules/ElasticsearchQueryRulesClient.java index 701bf9515..11c276eb6 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/query_rules/ElasticsearchQueryRulesClient.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/query_rules/ElasticsearchQueryRulesClient.java @@ -69,10 +69,12 @@ public ElasticsearchQueryRulesClient withTransportOptions(@Nullable TransportOpt // ----- Endpoint: query_rules.delete_rule /** - * Delete a query rule. Delete a query rule within a query ruleset. + * Delete a query rule. Delete a query rule within a query ruleset. This is a + * destructive action that is only recoverable by re-adding the same rule with + * the create or update query rule API. * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/delete-query-rule.html">Documentation * on elastic.co */ @@ -84,13 +86,15 @@ public DeleteRuleResponse deleteRule(DeleteRuleRequest request) throws IOExcepti } /** - * Delete a query rule. Delete a query rule within a query ruleset. + * Delete a query rule. Delete a query rule within a query ruleset. This is a + * destructive action that is only recoverable by re-adding the same rule with + * the create or update query rule API. * * @param fn * a function that initializes a builder to create the * {@link DeleteRuleRequest} * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/delete-query-rule.html">Documentation * on elastic.co */ @@ -102,10 +106,11 @@ public final DeleteRuleResponse deleteRule(FunctionDocumentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/delete-query-ruleset.html">Documentation * on elastic.co */ @@ -118,13 +123,14 @@ public DeleteRulesetResponse deleteRuleset(DeleteRulesetRequest request) } /** - * Delete a query ruleset. + * Delete a query ruleset. Remove a query ruleset and its associated data. This + * is a destructive action that is not recoverable. * * @param fn * a function that initializes a builder to create the * {@link DeleteRulesetRequest} * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/delete-query-ruleset.html">Documentation * on elastic.co */ @@ -140,7 +146,7 @@ public final DeleteRulesetResponse deleteRuleset( * Get a query rule. Get details about a query rule within a query ruleset. * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/get-query-rule.html">Documentation * on elastic.co */ @@ -158,7 +164,7 @@ public GetRuleResponse getRule(GetRuleRequest request) throws IOException, Elast * a function that initializes a builder to create the * {@link GetRuleRequest} * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/get-query-rule.html">Documentation * on elastic.co */ @@ -173,7 +179,7 @@ public final GetRuleResponse getRule(FunctionDocumentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/get-query-ruleset.html">Documentation * on elastic.co */ @@ -191,7 +197,7 @@ public GetRulesetResponse getRuleset(GetRulesetRequest request) throws IOExcepti * a function that initializes a builder to create the * {@link GetRulesetRequest} * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/get-query-ruleset.html">Documentation * on elastic.co */ @@ -206,7 +212,7 @@ public final GetRulesetResponse getRuleset(FunctionDocumentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/list-query-rulesets.html">Documentation * on elastic.co */ @@ -224,7 +230,7 @@ public ListRulesetsResponse listRulesets(ListRulesetsRequest request) throws IOE * a function that initializes a builder to create the * {@link ListRulesetsRequest} * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/list-query-rulesets.html">Documentation * on elastic.co */ @@ -238,7 +244,7 @@ public final ListRulesetsResponse listRulesets( * Get all query rulesets. Get summarized information about the query rulesets. * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/list-query-rulesets.html">Documentation * on elastic.co */ @@ -252,9 +258,16 @@ public ListRulesetsResponse listRulesets() throws IOException, ElasticsearchExce /** * Create or update a query rule. Create or update a query rule within a query * ruleset. + *

    + * IMPORTANT: Due to limitations within pinned queries, you can only pin + * documents using ids or docs, but cannot use both in single rule. It is + * advised to use one or the other in query rulesets, to avoid errors. + * Additionally, pinned queries have a maximum limit of 100 pinned hits. If + * multiple matching rules pin more than 100 documents, only the first 100 + * documents are pinned in the order they are specified in the ruleset. * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/put-query-rule.html">Documentation * on elastic.co */ @@ -268,12 +281,19 @@ public PutRuleResponse putRule(PutRuleRequest request) throws IOException, Elast /** * Create or update a query rule. Create or update a query rule within a query * ruleset. + *

    + * IMPORTANT: Due to limitations within pinned queries, you can only pin + * documents using ids or docs, but cannot use both in single rule. It is + * advised to use one or the other in query rulesets, to avoid errors. + * Additionally, pinned queries have a maximum limit of 100 pinned hits. If + * multiple matching rules pin more than 100 documents, only the first 100 + * documents are pinned in the order they are specified in the ruleset. * * @param fn * a function that initializes a builder to create the * {@link PutRuleRequest} * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/put-query-rule.html">Documentation * on elastic.co */ @@ -285,10 +305,19 @@ public final PutRuleResponse putRule(Functionxpack.applications.rules.max_rules_per_ruleset cluster setting. + *

    + * IMPORTANT: Due to limitations within pinned queries, you can only select + * documents using ids or docs, but cannot use both in + * single rule. It is advised to use one or the other in query rulesets, to + * avoid errors. Additionally, pinned queries have a maximum limit of 100 pinned + * hits. If multiple matching rules pin more than 100 documents, only the first + * 100 documents are pinned in the order they are specified in the ruleset. * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/put-query-ruleset.html">Documentation * on elastic.co */ @@ -300,13 +329,22 @@ public PutRulesetResponse putRuleset(PutRulesetRequest request) throws IOExcepti } /** - * Create or update a query ruleset. + * Create or update a query ruleset. There is a limit of 100 rules per ruleset. + * This limit can be increased by using the + * xpack.applications.rules.max_rules_per_ruleset cluster setting. + *

    + * IMPORTANT: Due to limitations within pinned queries, you can only select + * documents using ids or docs, but cannot use both in + * single rule. It is advised to use one or the other in query rulesets, to + * avoid errors. Additionally, pinned queries have a maximum limit of 100 pinned + * hits. If multiple matching rules pin more than 100 documents, only the first + * 100 documents are pinned in the order they are specified in the ruleset. * * @param fn * a function that initializes a builder to create the * {@link PutRulesetRequest} * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/put-query-ruleset.html">Documentation * on elastic.co */ @@ -322,7 +360,7 @@ public final PutRulesetResponse putRuleset(FunctionDocumentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/test-query-ruleset.html">Documentation * on elastic.co */ @@ -341,7 +379,7 @@ public TestResponse test(TestRequest request) throws IOException, ElasticsearchE * a function that initializes a builder to create the * {@link TestRequest} * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/test-query-ruleset.html">Documentation * on elastic.co */ diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/query_rules/ListRulesetsRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/query_rules/ListRulesetsRequest.java index ca631743b..6141c518d 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/query_rules/ListRulesetsRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/query_rules/ListRulesetsRequest.java @@ -83,7 +83,7 @@ public static ListRulesetsRequest of(Function * API name: {@code from} */ @@ -93,7 +93,7 @@ public final Integer from() { } /** - * specifies a max number of results to get + * The maximum number of results to retrieve. *

    * API name: {@code size} */ @@ -118,7 +118,7 @@ public static class Builder extends RequestBase.AbstractBuilder private Integer size; /** - * Starting offset (default: 0) + * The offset from the first result to fetch. *

    * API name: {@code from} */ @@ -128,7 +128,7 @@ public final Builder from(@Nullable Integer value) { } /** - * specifies a max number of results to get + * The maximum number of results to retrieve. *

    * API name: {@code size} */ diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/query_rules/PutRuleRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/query_rules/PutRuleRequest.java index 4add60608..44b73344d 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/query_rules/PutRuleRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/query_rules/PutRuleRequest.java @@ -62,6 +62,13 @@ /** * Create or update a query rule. Create or update a query rule within a query * ruleset. + *

    + * IMPORTANT: Due to limitations within pinned queries, you can only pin + * documents using ids or docs, but cannot use both in single rule. It is + * advised to use one or the other in query rulesets, to avoid errors. + * Additionally, pinned queries have a maximum limit of 100 pinned hits. If + * multiple matching rules pin more than 100 documents, only the first 100 + * documents are pinned in the order they are specified in the ruleset. * * @see API * specification @@ -99,14 +106,21 @@ public static PutRuleRequest of(Function> } /** - * Required - API name: {@code actions} + * Required - The actions to take when the rule is matched. The format of this + * action depends on the rule type. + *

    + * API name: {@code actions} */ public final QueryRuleActions actions() { return this.actions; } /** - * Required - API name: {@code criteria} + * Required - The criteria that must be met for the rule to be applied. If + * multiple criteria are specified for a rule, all criteria must be met for the + * rule to be applied. + *

    + * API name: {@code criteria} */ public final List criteria() { return this.criteria; @@ -122,7 +136,7 @@ public final Integer priority() { /** * Required - The unique identifier of the query rule within the specified - * ruleset to be created or updated + * ruleset to be created or updated. *

    * API name: {@code rule_id} */ @@ -132,7 +146,7 @@ public final String ruleId() { /** * Required - The unique identifier of the query ruleset containing the rule to - * be created or updated + * be created or updated. *

    * API name: {@code ruleset_id} */ @@ -141,7 +155,9 @@ public final String rulesetId() { } /** - * Required - API name: {@code type} + * Required - The type of rule. + *

    + * API name: {@code type} */ public final QueryRuleType type() { return this.type; @@ -202,7 +218,10 @@ public static class Builder extends RequestBase.AbstractBuilder impleme private QueryRuleType type; /** - * Required - API name: {@code actions} + * Required - The actions to take when the rule is matched. The format of this + * action depends on the rule type. + *

    + * API name: {@code actions} */ public final Builder actions(QueryRuleActions value) { this.actions = value; @@ -210,14 +229,21 @@ public final Builder actions(QueryRuleActions value) { } /** - * Required - API name: {@code actions} + * Required - The actions to take when the rule is matched. The format of this + * action depends on the rule type. + *

    + * API name: {@code actions} */ public final Builder actions(Function> fn) { return this.actions(fn.apply(new QueryRuleActions.Builder()).build()); } /** - * Required - API name: {@code criteria} + * Required - The criteria that must be met for the rule to be applied. If + * multiple criteria are specified for a rule, all criteria must be met for the + * rule to be applied. + *

    + * API name: {@code criteria} *

    * Adds all elements of list to criteria. */ @@ -227,7 +253,11 @@ public final Builder criteria(List list) { } /** - * Required - API name: {@code criteria} + * Required - The criteria that must be met for the rule to be applied. If + * multiple criteria are specified for a rule, all criteria must be met for the + * rule to be applied. + *

    + * API name: {@code criteria} *

    * Adds one or more values to criteria. */ @@ -237,7 +267,11 @@ public final Builder criteria(QueryRuleCriteria value, QueryRuleCriteria... valu } /** - * Required - API name: {@code criteria} + * Required - The criteria that must be met for the rule to be applied. If + * multiple criteria are specified for a rule, all criteria must be met for the + * rule to be applied. + *

    + * API name: {@code criteria} *

    * Adds a value to criteria using a builder lambda. */ @@ -255,7 +289,7 @@ public final Builder priority(@Nullable Integer value) { /** * Required - The unique identifier of the query rule within the specified - * ruleset to be created or updated + * ruleset to be created or updated. *

    * API name: {@code rule_id} */ @@ -266,7 +300,7 @@ public final Builder ruleId(String value) { /** * Required - The unique identifier of the query ruleset containing the rule to - * be created or updated + * be created or updated. *

    * API name: {@code ruleset_id} */ @@ -276,7 +310,9 @@ public final Builder rulesetId(String value) { } /** - * Required - API name: {@code type} + * Required - The type of rule. + *

    + * API name: {@code type} */ public final Builder type(QueryRuleType value) { this.type = value; diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/query_rules/PutRulesetRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/query_rules/PutRulesetRequest.java index 081c61197..799be6428 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/query_rules/PutRulesetRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/query_rules/PutRulesetRequest.java @@ -59,7 +59,16 @@ // typedef: query_rules.put_ruleset.Request /** - * Create or update a query ruleset. + * Create or update a query ruleset. There is a limit of 100 rules per ruleset. + * This limit can be increased by using the + * xpack.applications.rules.max_rules_per_ruleset cluster setting. + *

    + * IMPORTANT: Due to limitations within pinned queries, you can only select + * documents using ids or docs, but cannot use both in + * single rule. It is advised to use one or the other in query rulesets, to + * avoid errors. Additionally, pinned queries have a maximum limit of 100 pinned + * hits. If multiple matching rules pin more than 100 documents, only the first + * 100 documents are pinned in the order they are specified in the ruleset. * * @see API * specification @@ -92,7 +101,7 @@ public final List rules() { /** * Required - The unique identifier of the query ruleset to be created or - * updated + * updated. *

    * API name: {@code ruleset_id} */ @@ -168,7 +177,7 @@ public final Builder rules(Function> /** * Required - The unique identifier of the query ruleset to be created or - * updated + * updated. *

    * API name: {@code ruleset_id} */ diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/query_rules/QueryRule.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/query_rules/QueryRule.java index d37396832..d853b85d5 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/query_rules/QueryRule.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/query_rules/QueryRule.java @@ -89,28 +89,41 @@ public static QueryRule queryRuleOf(Function> } /** - * Required - API name: {@code rule_id} + * Required - A unique identifier for the rule. + *

    + * API name: {@code rule_id} */ public final String ruleId() { return this.ruleId; } /** - * Required - API name: {@code type} + * Required - The type of rule. pinned will identify and pin + * specific documents to the top of search results. exclude will + * exclude specific documents from search results. + *

    + * API name: {@code type} */ public final QueryRuleType type() { return this.type; } /** - * Required - API name: {@code criteria} + * Required - The criteria that must be met for the rule to be applied. If + * multiple criteria are specified for a rule, all criteria must be met for the + * rule to be applied. + *

    + * API name: {@code criteria} */ public final List criteria() { return this.criteria; } /** - * Required - API name: {@code actions} + * Required - The actions to take when the rule is matched. The format of this + * action depends on the rule type. + *

    + * API name: {@code actions} */ public final QueryRuleActions actions() { return this.actions; @@ -206,7 +219,9 @@ public abstract static class AbstractBuilder + * API name: {@code rule_id} */ public final BuilderT ruleId(String value) { this.ruleId = value; @@ -214,7 +229,11 @@ public final BuilderT ruleId(String value) { } /** - * Required - API name: {@code type} + * Required - The type of rule. pinned will identify and pin + * specific documents to the top of search results. exclude will + * exclude specific documents from search results. + *

    + * API name: {@code type} */ public final BuilderT type(QueryRuleType value) { this.type = value; @@ -222,7 +241,11 @@ public final BuilderT type(QueryRuleType value) { } /** - * Required - API name: {@code criteria} + * Required - The criteria that must be met for the rule to be applied. If + * multiple criteria are specified for a rule, all criteria must be met for the + * rule to be applied. + *

    + * API name: {@code criteria} *

    * Adds all elements of list to criteria. */ @@ -232,7 +255,11 @@ public final BuilderT criteria(List list) { } /** - * Required - API name: {@code criteria} + * Required - The criteria that must be met for the rule to be applied. If + * multiple criteria are specified for a rule, all criteria must be met for the + * rule to be applied. + *

    + * API name: {@code criteria} *

    * Adds one or more values to criteria. */ @@ -242,7 +269,11 @@ public final BuilderT criteria(QueryRuleCriteria value, QueryRuleCriteria... val } /** - * Required - API name: {@code criteria} + * Required - The criteria that must be met for the rule to be applied. If + * multiple criteria are specified for a rule, all criteria must be met for the + * rule to be applied. + *

    + * API name: {@code criteria} *

    * Adds a value to criteria using a builder lambda. */ @@ -251,7 +282,10 @@ public final BuilderT criteria(Function + * API name: {@code actions} */ public final BuilderT actions(QueryRuleActions value) { this.actions = value; @@ -259,7 +293,10 @@ public final BuilderT actions(QueryRuleActions value) { } /** - * Required - API name: {@code actions} + * Required - The actions to take when the rule is matched. The format of this + * action depends on the rule type. + *

    + * API name: {@code actions} */ public final BuilderT actions(Function> fn) { return this.actions(fn.apply(new QueryRuleActions.Builder()).build()); diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/query_rules/QueryRuleActions.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/query_rules/QueryRuleActions.java index 0cefb5d5d..42bda9b02 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/query_rules/QueryRuleActions.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/query_rules/QueryRuleActions.java @@ -80,6 +80,10 @@ public static QueryRuleActions of(Functionids or docs may be specified and at least one must + * be specified. + *

    * API name: {@code ids} */ public final List ids() { @@ -87,6 +91,15 @@ public final List ids() { } /** + * The documents to apply the rule to. Only one of ids or + * docs may be specified and at least one must be specified. There + * is a maximum value of 100 documents in a rule. You can specify the following + * attributes for each document: + *

      + *
    • _index: The index of the document to pin.
    • + *
    • _id: The unique document ID.
    • + *
    + *

    * API name: {@code docs} */ public final List docs() { @@ -146,6 +159,10 @@ public static class Builder extends WithJsonObjectBuilderBase implement private List docs; /** + * The unique document IDs of the documents to apply the rule to. Only one of + * ids or docs may be specified and at least one must + * be specified. + *

    * API name: {@code ids} *

    * Adds all elements of list to ids. @@ -156,6 +173,10 @@ public final Builder ids(List list) { } /** + * The unique document IDs of the documents to apply the rule to. Only one of + * ids or docs may be specified and at least one must + * be specified. + *

    * API name: {@code ids} *

    * Adds one or more values to ids. @@ -166,6 +187,15 @@ public final Builder ids(String value, String... values) { } /** + * The documents to apply the rule to. Only one of ids or + * docs may be specified and at least one must be specified. There + * is a maximum value of 100 documents in a rule. You can specify the following + * attributes for each document: + *

      + *
    • _index: The index of the document to pin.
    • + *
    • _id: The unique document ID.
    • + *
    + *

    * API name: {@code docs} *

    * Adds all elements of list to docs. @@ -176,6 +206,15 @@ public final Builder docs(List list) { } /** + * The documents to apply the rule to. Only one of ids or + * docs may be specified and at least one must be specified. There + * is a maximum value of 100 documents in a rule. You can specify the following + * attributes for each document: + *

      + *
    • _index: The index of the document to pin.
    • + *
    • _id: The unique document ID.
    • + *
    + *

    * API name: {@code docs} *

    * Adds one or more values to docs. @@ -186,6 +225,15 @@ public final Builder docs(PinnedDoc value, PinnedDoc... values) { } /** + * The documents to apply the rule to. Only one of ids or + * docs may be specified and at least one must be specified. There + * is a maximum value of 100 documents in a rule. You can specify the following + * attributes for each document: + *

      + *
    • _index: The index of the document to pin.
    • + *
    • _id: The unique document ID.
    • + *
    + *

    * API name: {@code docs} *

    * Adds a value to docs using a builder lambda. diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/query_rules/QueryRuleCriteria.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/query_rules/QueryRuleCriteria.java index fced35f51..136eaa87f 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/query_rules/QueryRuleCriteria.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/query_rules/QueryRuleCriteria.java @@ -84,13 +84,44 @@ public static QueryRuleCriteria of(Function + *

  • always: Matches all queries, regardless of input.
  • + *
  • contains: Matches that contain this value anywhere in the + * field meet the criteria defined by the rule. Only applicable for string + * values.
  • + *
  • exact: Only exact matches meet the criteria defined by the + * rule. Applicable for string or numerical values.
  • + *
  • fuzzy: Exact matches or matches within the allowed + * Levenshtein Edit Distance meet the criteria defined by the rule. Only + * applicable for string values.
  • + *
  • gt: Matches with a value greater than this value meet the + * criteria defined by the rule. Only applicable for numerical values.
  • + *
  • gte: Matches with a value greater than or equal to this + * value meet the criteria defined by the rule. Only applicable for numerical + * values.
  • + *
  • lt: Matches with a value less than this value meet the + * criteria defined by the rule. Only applicable for numerical values.
  • + *
  • lte: Matches with a value less than or equal to this value + * meet the criteria defined by the rule. Only applicable for numerical + * values.
  • + *
  • prefix: Matches that start with this value meet the criteria + * defined by the rule. Only applicable for string values.
  • + *
  • suffix: Matches that end with this value meet the criteria + * defined by the rule. Only applicable for string values.
  • + * + *

    + * API name: {@code type} */ public final QueryRuleCriteriaType type() { return this.type; } /** + * The metadata field to match against. This metadata will be used to match + * against match_criteria sent in the rule. It is required for all + * criteria types except always. + *

    * API name: {@code metadata} */ @Nullable @@ -99,6 +130,10 @@ public final String metadata() { } /** + * The values to match against the metadata field. Only one value + * must match for the criteria to be met. It is required for all criteria types + * except always. + *

    * API name: {@code values} */ public final List values() { @@ -157,7 +192,34 @@ public static class Builder extends WithJsonObjectBuilderBase implement private List values; /** - * Required - API name: {@code type} + * Required - The type of criteria. The following criteria types are supported: + *

      + *
    • always: Matches all queries, regardless of input.
    • + *
    • contains: Matches that contain this value anywhere in the + * field meet the criteria defined by the rule. Only applicable for string + * values.
    • + *
    • exact: Only exact matches meet the criteria defined by the + * rule. Applicable for string or numerical values.
    • + *
    • fuzzy: Exact matches or matches within the allowed + * Levenshtein Edit Distance meet the criteria defined by the rule. Only + * applicable for string values.
    • + *
    • gt: Matches with a value greater than this value meet the + * criteria defined by the rule. Only applicable for numerical values.
    • + *
    • gte: Matches with a value greater than or equal to this + * value meet the criteria defined by the rule. Only applicable for numerical + * values.
    • + *
    • lt: Matches with a value less than this value meet the + * criteria defined by the rule. Only applicable for numerical values.
    • + *
    • lte: Matches with a value less than or equal to this value + * meet the criteria defined by the rule. Only applicable for numerical + * values.
    • + *
    • prefix: Matches that start with this value meet the criteria + * defined by the rule. Only applicable for string values.
    • + *
    • suffix: Matches that end with this value meet the criteria + * defined by the rule. Only applicable for string values.
    • + *
    + *

    + * API name: {@code type} */ public final Builder type(QueryRuleCriteriaType value) { this.type = value; @@ -165,6 +227,10 @@ public final Builder type(QueryRuleCriteriaType value) { } /** + * The metadata field to match against. This metadata will be used to match + * against match_criteria sent in the rule. It is required for all + * criteria types except always. + *

    * API name: {@code metadata} */ public final Builder metadata(@Nullable String value) { @@ -173,6 +239,10 @@ public final Builder metadata(@Nullable String value) { } /** + * The values to match against the metadata field. Only one value + * must match for the criteria to be met. It is required for all criteria types + * except always. + *

    * API name: {@code values} *

    * Adds all elements of list to values. @@ -183,6 +253,10 @@ public final Builder values(List list) { } /** + * The values to match against the metadata field. Only one value + * must match for the criteria to be met. It is required for all criteria types + * except always. + *

    * API name: {@code values} *

    * Adds one or more values to values. diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/query_rules/QueryRuleset.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/query_rules/QueryRuleset.java index f0df77249..7c4af2e6d 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/query_rules/QueryRuleset.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/query_rules/QueryRuleset.java @@ -74,7 +74,7 @@ protected QueryRuleset(AbstractBuilder builder) { } /** - * Required - Query Ruleset unique identifier + * Required - A unique identifier for the ruleset. *

    * API name: {@code ruleset_id} */ @@ -83,7 +83,7 @@ public final String rulesetId() { } /** - * Required - Rules associated with the query ruleset + * Required - Rules associated with the query ruleset. *

    * API name: {@code rules} */ @@ -131,7 +131,7 @@ public abstract static class AbstractBuilder rules; /** - * Required - Query Ruleset unique identifier + * Required - A unique identifier for the ruleset. *

    * API name: {@code ruleset_id} */ @@ -141,7 +141,7 @@ public final BuilderT rulesetId(String value) { } /** - * Required - Rules associated with the query ruleset + * Required - Rules associated with the query ruleset. *

    * API name: {@code rules} *

    @@ -153,7 +153,7 @@ public final BuilderT rules(List list) { } /** - * Required - Rules associated with the query ruleset + * Required - Rules associated with the query ruleset. *

    * API name: {@code rules} *

    @@ -165,7 +165,7 @@ public final BuilderT rules(QueryRule value, QueryRule... values) { } /** - * Required - Rules associated with the query ruleset + * Required - Rules associated with the query ruleset. *

    * API name: {@code rules} *

    diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/query_rules/TestRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/query_rules/TestRequest.java index aa53b4113..4e7b47e68 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/query_rules/TestRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/query_rules/TestRequest.java @@ -85,7 +85,11 @@ public static TestRequest of(Function> fn) { } /** - * Required - API name: {@code match_criteria} + * Required - The match criteria to apply to rules in the given query ruleset. + * Match criteria should match the keys defined in the + * criteria.metadata field of the rule. + *

    + * API name: {@code match_criteria} */ public final Map matchCriteria() { return this.matchCriteria; @@ -138,7 +142,11 @@ public static class Builder extends RequestBase.AbstractBuilder impleme private String rulesetId; /** - * Required - API name: {@code match_criteria} + * Required - The match criteria to apply to rules in the given query ruleset. + * Match criteria should match the keys defined in the + * criteria.metadata field of the rule. + *

    + * API name: {@code match_criteria} *

    * Adds all entries of map to matchCriteria. */ @@ -148,7 +156,11 @@ public final Builder matchCriteria(Map map) { } /** - * Required - API name: {@code match_criteria} + * Required - The match criteria to apply to rules in the given query ruleset. + * Match criteria should match the keys defined in the + * criteria.metadata field of the rule. + *

    + * API name: {@code match_criteria} *

    * Adds an entry to matchCriteria. */ diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/query_rules/list_rulesets/QueryRulesetListItem.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/query_rules/list_rulesets/QueryRulesetListItem.java index 12a44fa53..136c01ac3 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/query_rules/list_rulesets/QueryRulesetListItem.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/query_rules/list_rulesets/QueryRulesetListItem.java @@ -87,7 +87,7 @@ public static QueryRulesetListItem of(Function * API name: {@code ruleset_id} */ @@ -96,7 +96,7 @@ public final String rulesetId() { } /** - * Required - The number of rules associated with this ruleset + * Required - The number of rules associated with the ruleset. *

    * API name: {@code rule_total_count} */ @@ -105,8 +105,12 @@ public final int ruleTotalCount() { } /** - * Required - A map of criteria type (e.g. exact) to the number of rules of that - * type + * Required - A map of criteria type (for example, exact) to the + * number of rules of that type. + *

    + * NOTE: The counts in rule_criteria_types_counts may be larger + * than the value of rule_total_count because a rule may have + * multiple criteria. *

    * API name: {@code rule_criteria_types_counts} */ @@ -115,8 +119,8 @@ public final Map ruleCriteriaTypesCounts() { } /** - * Required - A map of rule type (e.g. pinned) to the number of rules of that - * type + * Required - A map of rule type (for example, pinned) to the + * number of rules of that type. *

    * API name: {@code rule_type_counts} */ @@ -189,7 +193,7 @@ public static class Builder extends WithJsonObjectBuilderBase private Map ruleTypeCounts; /** - * Required - Ruleset unique identifier + * Required - A unique identifier for the ruleset. *

    * API name: {@code ruleset_id} */ @@ -199,7 +203,7 @@ public final Builder rulesetId(String value) { } /** - * Required - The number of rules associated with this ruleset + * Required - The number of rules associated with the ruleset. *

    * API name: {@code rule_total_count} */ @@ -209,8 +213,12 @@ public final Builder ruleTotalCount(int value) { } /** - * Required - A map of criteria type (e.g. exact) to the number of rules of that - * type + * Required - A map of criteria type (for example, exact) to the + * number of rules of that type. + *

    + * NOTE: The counts in rule_criteria_types_counts may be larger + * than the value of rule_total_count because a rule may have + * multiple criteria. *

    * API name: {@code rule_criteria_types_counts} *

    @@ -222,8 +230,12 @@ public final Builder ruleCriteriaTypesCounts(Map map) { } /** - * Required - A map of criteria type (e.g. exact) to the number of rules of that - * type + * Required - A map of criteria type (for example, exact) to the + * number of rules of that type. + *

    + * NOTE: The counts in rule_criteria_types_counts may be larger + * than the value of rule_total_count because a rule may have + * multiple criteria. *

    * API name: {@code rule_criteria_types_counts} *

    @@ -235,8 +247,8 @@ public final Builder ruleCriteriaTypesCounts(String key, Integer value) { } /** - * Required - A map of rule type (e.g. pinned) to the number of rules of that - * type + * Required - A map of rule type (for example, pinned) to the + * number of rules of that type. *

    * API name: {@code rule_type_counts} *

    @@ -248,8 +260,8 @@ public final Builder ruleTypeCounts(Map map) { } /** - * Required - A map of rule type (e.g. pinned) to the number of rules of that - * type + * Required - A map of rule type (for example, pinned) to the + * number of rules of that type. *

    * API name: {@code rule_type_counts} *

    diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/rollup/DeleteJobRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/rollup/DeleteJobRequest.java index 8abae242c..b3b67282d 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/rollup/DeleteJobRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/rollup/DeleteJobRequest.java @@ -56,12 +56,41 @@ // typedef: rollup.delete_job.Request /** - * Deletes an existing rollup job. + * Delete a rollup job. + *

    + * A job must be stopped before it can be deleted. If you attempt to delete a + * started job, an error occurs. Similarly, if you attempt to delete a + * nonexistent job, an exception occurs. + *

    + * IMPORTANT: When you delete a job, you remove only the process that is + * actively monitoring and rolling up data. The API does not delete any + * previously rolled up data. This is by design; a user may wish to roll up a + * static data set. Because the data set is static, after it has been fully + * rolled up there is no need to keep the indexing rollup job around (as there + * will be no new data). Thus the job can be deleted, leaving behind the rolled + * up data for analysis. If you wish to also remove the rollup data and the + * rollup index contains the data for only a single job, you can delete the + * whole rollup index. If the rollup index stores data from several jobs, you + * must issue a delete-by-query that targets the rollup job's identifier in the + * rollup index. For example: * + *

    + * POST my_rollup_index/_delete_by_query
    + * {
    + *   "query": {
    + *     "term": {
    + *       "_rollup.id": "the_rollup_job_id"
    + *     }
    + *   }
    + * }
    + * 
    + * 
    + * * @see API * specification + * @deprecated 8.11.0 */ - +@Deprecated public class DeleteJobRequest extends RequestBase { private final String id; @@ -91,7 +120,7 @@ public final String id() { /** * Builder for {@link DeleteJobRequest}. */ - + @Deprecated public static class Builder extends RequestBase.AbstractBuilder implements ObjectBuilder { diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/rollup/ElasticsearchRollupAsyncClient.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/rollup/ElasticsearchRollupAsyncClient.java index f564b55ba..acd53b940 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/rollup/ElasticsearchRollupAsyncClient.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/rollup/ElasticsearchRollupAsyncClient.java @@ -70,10 +70,38 @@ public ElasticsearchRollupAsyncClient withTransportOptions(@Nullable TransportOp // ----- Endpoint: rollup.delete_job /** - * Deletes an existing rollup job. + * Delete a rollup job. + *

    + * A job must be stopped before it can be deleted. If you attempt to delete a + * started job, an error occurs. Similarly, if you attempt to delete a + * nonexistent job, an exception occurs. + *

    + * IMPORTANT: When you delete a job, you remove only the process that is + * actively monitoring and rolling up data. The API does not delete any + * previously rolled up data. This is by design; a user may wish to roll up a + * static data set. Because the data set is static, after it has been fully + * rolled up there is no need to keep the indexing rollup job around (as there + * will be no new data). Thus the job can be deleted, leaving behind the rolled + * up data for analysis. If you wish to also remove the rollup data and the + * rollup index contains the data for only a single job, you can delete the + * whole rollup index. If the rollup index stores data from several jobs, you + * must issue a delete-by-query that targets the rollup job's identifier in the + * rollup index. For example: * + *

    +	 * POST my_rollup_index/_delete_by_query
    +	 * {
    +	 *   "query": {
    +	 *     "term": {
    +	 *       "_rollup.id": "the_rollup_job_id"
    +	 *     }
    +	 *   }
    +	 * }
    +	 * 
    +	 * 
    + * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/rollup-delete-job.html">Documentation * on elastic.co */ @@ -85,13 +113,41 @@ public CompletableFuture deleteJob(DeleteJobRequest request) } /** - * Deletes an existing rollup job. + * Delete a rollup job. + *

    + * A job must be stopped before it can be deleted. If you attempt to delete a + * started job, an error occurs. Similarly, if you attempt to delete a + * nonexistent job, an exception occurs. + *

    + * IMPORTANT: When you delete a job, you remove only the process that is + * actively monitoring and rolling up data. The API does not delete any + * previously rolled up data. This is by design; a user may wish to roll up a + * static data set. Because the data set is static, after it has been fully + * rolled up there is no need to keep the indexing rollup job around (as there + * will be no new data). Thus the job can be deleted, leaving behind the rolled + * up data for analysis. If you wish to also remove the rollup data and the + * rollup index contains the data for only a single job, you can delete the + * whole rollup index. If the rollup index stores data from several jobs, you + * must issue a delete-by-query that targets the rollup job's identifier in the + * rollup index. For example: * + *

    +	 * POST my_rollup_index/_delete_by_query
    +	 * {
    +	 *   "query": {
    +	 *     "term": {
    +	 *       "_rollup.id": "the_rollup_job_id"
    +	 *     }
    +	 *   }
    +	 * }
    +	 * 
    +	 * 
    + * * @param fn * a function that initializes a builder to create the * {@link DeleteJobRequest} * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/rollup-delete-job.html">Documentation * on elastic.co */ @@ -103,10 +159,16 @@ public final CompletableFuture deleteJob( // ----- Endpoint: rollup.get_jobs /** - * Retrieves the configuration, stats, and status of rollup jobs. + * Get rollup job information. Get the configuration, stats, and status of + * rollup jobs. + *

    + * NOTE: This API returns only active (both STARTED and + * STOPPED) jobs. If a job was created, ran for a while, then was + * deleted, the API does not return any details about it. For details about a + * historical rollup job, the rollup capabilities API may be more useful. * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/rollup-get-job.html">Documentation * on elastic.co */ @@ -118,13 +180,19 @@ public CompletableFuture getJobs(GetJobsRequest request) { } /** - * Retrieves the configuration, stats, and status of rollup jobs. + * Get rollup job information. Get the configuration, stats, and status of + * rollup jobs. + *

    + * NOTE: This API returns only active (both STARTED and + * STOPPED) jobs. If a job was created, ran for a while, then was + * deleted, the API does not return any details about it. For details about a + * historical rollup job, the rollup capabilities API may be more useful. * * @param fn * a function that initializes a builder to create the * {@link GetJobsRequest} * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/rollup-get-job.html">Documentation * on elastic.co */ @@ -134,10 +202,16 @@ public final CompletableFuture getJobs( } /** - * Retrieves the configuration, stats, and status of rollup jobs. + * Get rollup job information. Get the configuration, stats, and status of + * rollup jobs. + *

    + * NOTE: This API returns only active (both STARTED and + * STOPPED) jobs. If a job was created, ran for a while, then was + * deleted, the API does not return any details about it. For details about a + * historical rollup job, the rollup capabilities API may be more useful. * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/rollup-get-job.html">Documentation * on elastic.co */ @@ -149,11 +223,23 @@ public CompletableFuture getJobs() { // ----- Endpoint: rollup.get_rollup_caps /** - * Returns the capabilities of any rollup jobs that have been configured for a - * specific index or index pattern. - * + * Get the rollup job capabilities. Get the capabilities of any rollup jobs that + * have been configured for a specific index or index pattern. + *

    + * This API is useful because a rollup job is often configured to rollup only a + * subset of fields from the source index. Furthermore, only certain + * aggregations can be configured for various fields, leading to a limited + * subset of functionality depending on that configuration. This API enables you + * to inspect an index and determine: + *

      + *
    1. Does this index have associated rollup data somewhere in the + * cluster?
    2. + *
    3. If yes to the first question, what fields were rolled up, what + * aggregations can be performed, and where does the data live?
    4. + *
    + * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/rollup-get-rollup-caps.html">Documentation * on elastic.co */ @@ -165,14 +251,26 @@ public CompletableFuture getRollupCaps(GetRollupCapsReque } /** - * Returns the capabilities of any rollup jobs that have been configured for a - * specific index or index pattern. - * + * Get the rollup job capabilities. Get the capabilities of any rollup jobs that + * have been configured for a specific index or index pattern. + *

    + * This API is useful because a rollup job is often configured to rollup only a + * subset of fields from the source index. Furthermore, only certain + * aggregations can be configured for various fields, leading to a limited + * subset of functionality depending on that configuration. This API enables you + * to inspect an index and determine: + *

      + *
    1. Does this index have associated rollup data somewhere in the + * cluster?
    2. + *
    3. If yes to the first question, what fields were rolled up, what + * aggregations can be performed, and where does the data live?
    4. + *
    + * * @param fn * a function that initializes a builder to create the * {@link GetRollupCapsRequest} * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/rollup-get-rollup-caps.html">Documentation * on elastic.co */ @@ -182,11 +280,23 @@ public final CompletableFuture getRollupCaps( } /** - * Returns the capabilities of any rollup jobs that have been configured for a - * specific index or index pattern. - * + * Get the rollup job capabilities. Get the capabilities of any rollup jobs that + * have been configured for a specific index or index pattern. + *

    + * This API is useful because a rollup job is often configured to rollup only a + * subset of fields from the source index. Furthermore, only certain + * aggregations can be configured for various fields, leading to a limited + * subset of functionality depending on that configuration. This API enables you + * to inspect an index and determine: + *

      + *
    1. Does this index have associated rollup data somewhere in the + * cluster?
    2. + *
    3. If yes to the first question, what fields were rolled up, what + * aggregations can be performed, and where does the data live?
    4. + *
    + * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/rollup-get-rollup-caps.html">Documentation * on elastic.co */ @@ -198,11 +308,19 @@ public CompletableFuture getRollupCaps() { // ----- Endpoint: rollup.get_rollup_index_caps /** - * Returns the rollup capabilities of all jobs inside of a rollup index (for - * example, the index where rollup data is stored). - * + * Get the rollup index capabilities. Get the rollup capabilities of all jobs + * inside of a rollup index. A single rollup index may store the data for + * multiple rollup jobs and may have a variety of capabilities depending on + * those jobs. This API enables you to determine: + *
      + *
    • What jobs are stored in an index (or indices specified via a + * pattern)?
    • + *
    • What target indices were rolled up, what fields were used in those + * rollups, and what aggregations can be performed on each job?
    • + *
    + * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/rollup-get-rollup-index-caps.html">Documentation * on elastic.co */ @@ -214,14 +332,22 @@ public CompletableFuture getRollupIndexCaps(GetRollu } /** - * Returns the rollup capabilities of all jobs inside of a rollup index (for - * example, the index where rollup data is stored). - * + * Get the rollup index capabilities. Get the rollup capabilities of all jobs + * inside of a rollup index. A single rollup index may store the data for + * multiple rollup jobs and may have a variety of capabilities depending on + * those jobs. This API enables you to determine: + *
      + *
    • What jobs are stored in an index (or indices specified via a + * pattern)?
    • + *
    • What target indices were rolled up, what fields were used in those + * rollups, and what aggregations can be performed on each job?
    • + *
    + * * @param fn * a function that initializes a builder to create the * {@link GetRollupIndexCapsRequest} * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/rollup-get-rollup-index-caps.html">Documentation * on elastic.co */ @@ -233,10 +359,26 @@ public final CompletableFuture getRollupIndexCaps( // ----- Endpoint: rollup.put_job /** - * Creates a rollup job. + * Create a rollup job. + *

    + * WARNING: From 8.15.0, calling this API in a cluster with no rollup usage will + * fail with a message about the deprecation and planned removal of rollup + * features. A cluster needs to contain either a rollup job or a rollup index in + * order for this API to be allowed to run. + *

    + * The rollup job configuration contains all the details about how the job + * should run, when it indexes documents, and what future queries will be able + * to run against the rollup index. + *

    + * There are three main sections to the job configuration: the logistical + * details about the job (for example, the cron schedule), the fields that are + * used for grouping, and what metrics to collect for each group. + *

    + * Jobs are created in a STOPPED state. You can start them with the + * start rollup jobs API. * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/rollup-put-job.html">Documentation * on elastic.co */ @@ -248,13 +390,29 @@ public CompletableFuture putJob(PutJobRequest request) { } /** - * Creates a rollup job. + * Create a rollup job. + *

    + * WARNING: From 8.15.0, calling this API in a cluster with no rollup usage will + * fail with a message about the deprecation and planned removal of rollup + * features. A cluster needs to contain either a rollup job or a rollup index in + * order for this API to be allowed to run. + *

    + * The rollup job configuration contains all the details about how the job + * should run, when it indexes documents, and what future queries will be able + * to run against the rollup index. + *

    + * There are three main sections to the job configuration: the logistical + * details about the job (for example, the cron schedule), the fields that are + * used for grouping, and what metrics to collect for each group. + *

    + * Jobs are created in a STOPPED state. You can start them with the + * start rollup jobs API. * * @param fn * a function that initializes a builder to create the * {@link PutJobRequest} * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/rollup-put-job.html">Documentation * on elastic.co */ @@ -266,10 +424,55 @@ public final CompletableFuture putJob( // ----- Endpoint: rollup.rollup_search /** - * Enables searching rolled-up data using the standard Query DSL. + * Search rolled-up data. The rollup search endpoint is needed because, + * internally, rolled-up documents utilize a different document structure than + * the original data. It rewrites standard Query DSL into a format that matches + * the rollup documents then takes the response and rewrites it back to what a + * client would expect given the original query. + *

    + * The request body supports a subset of features from the regular search API. + * The following functionality is not available: + *

    + * size: Because rollups work on pre-aggregated data, no search + * hits can be returned and so size must be set to zero or omitted entirely. + * highlighter, suggestors, post_filter, + * profile, explain: These are similarly disallowed. + *

    + * Searching both historical rollup and non-rollup data + *

    + * The rollup search API has the capability to search across both + * "live" non-rollup data and the aggregated rollup data. This is done + * by simply adding the live indices to the URI. For example: + * + *

    +	 * GET sensor-1,sensor_rollup/_rollup_search
    +	 * {
    +	 *   "size": 0,
    +	 *   "aggregations": {
    +	 *      "max_temperature": {
    +	 *       "max": {
    +	 *         "field": "temperature"
    +	 *       }
    +	 *     }
    +	 *   }
    +	 * }
    +	 * 
    +	 * 
    + *

    + * The rollup search endpoint does two things when the search runs: + *

      + *
    • The original request is sent to the non-rollup index unaltered.
    • + *
    • A rewritten version of the original request is sent to the rollup + * index.
    • + *
    + *

    + * When the two responses are received, the endpoint rewrites the rollup + * response and merges the two together. During the merging process, if there is + * any overlap in buckets between the two responses, the buckets from the + * non-rollup index are used. * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/rollup-search.html">Documentation * on elastic.co */ @@ -285,13 +488,58 @@ public CompletableFuture> rollupSear } /** - * Enables searching rolled-up data using the standard Query DSL. + * Search rolled-up data. The rollup search endpoint is needed because, + * internally, rolled-up documents utilize a different document structure than + * the original data. It rewrites standard Query DSL into a format that matches + * the rollup documents then takes the response and rewrites it back to what a + * client would expect given the original query. + *

    + * The request body supports a subset of features from the regular search API. + * The following functionality is not available: + *

    + * size: Because rollups work on pre-aggregated data, no search + * hits can be returned and so size must be set to zero or omitted entirely. + * highlighter, suggestors, post_filter, + * profile, explain: These are similarly disallowed. + *

    + * Searching both historical rollup and non-rollup data + *

    + * The rollup search API has the capability to search across both + * "live" non-rollup data and the aggregated rollup data. This is done + * by simply adding the live indices to the URI. For example: + * + *

    +	 * GET sensor-1,sensor_rollup/_rollup_search
    +	 * {
    +	 *   "size": 0,
    +	 *   "aggregations": {
    +	 *      "max_temperature": {
    +	 *       "max": {
    +	 *         "field": "temperature"
    +	 *       }
    +	 *     }
    +	 *   }
    +	 * }
    +	 * 
    +	 * 
    + *

    + * The rollup search endpoint does two things when the search runs: + *

      + *
    • The original request is sent to the non-rollup index unaltered.
    • + *
    • A rewritten version of the original request is sent to the rollup + * index.
    • + *
    + *

    + * When the two responses are received, the endpoint rewrites the rollup + * response and merges the two together. During the merging process, if there is + * any overlap in buckets between the two responses, the buckets from the + * non-rollup index are used. * * @param fn * a function that initializes a builder to create the * {@link RollupSearchRequest} * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/rollup-search.html">Documentation * on elastic.co */ @@ -302,10 +550,55 @@ public final CompletableFuture> roll } /** - * Enables searching rolled-up data using the standard Query DSL. + * Search rolled-up data. The rollup search endpoint is needed because, + * internally, rolled-up documents utilize a different document structure than + * the original data. It rewrites standard Query DSL into a format that matches + * the rollup documents then takes the response and rewrites it back to what a + * client would expect given the original query. + *

    + * The request body supports a subset of features from the regular search API. + * The following functionality is not available: + *

    + * size: Because rollups work on pre-aggregated data, no search + * hits can be returned and so size must be set to zero or omitted entirely. + * highlighter, suggestors, post_filter, + * profile, explain: These are similarly disallowed. + *

    + * Searching both historical rollup and non-rollup data + *

    + * The rollup search API has the capability to search across both + * "live" non-rollup data and the aggregated rollup data. This is done + * by simply adding the live indices to the URI. For example: + * + *

    +	 * GET sensor-1,sensor_rollup/_rollup_search
    +	 * {
    +	 *   "size": 0,
    +	 *   "aggregations": {
    +	 *      "max_temperature": {
    +	 *       "max": {
    +	 *         "field": "temperature"
    +	 *       }
    +	 *     }
    +	 *   }
    +	 * }
    +	 * 
    +	 * 
    + *

    + * The rollup search endpoint does two things when the search runs: + *

      + *
    • The original request is sent to the non-rollup index unaltered.
    • + *
    • A rewritten version of the original request is sent to the rollup + * index.
    • + *
    + *

    + * When the two responses are received, the endpoint rewrites the rollup + * response and merges the two together. During the merging process, if there is + * any overlap in buckets between the two responses, the buckets from the + * non-rollup index are used. * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/rollup-search.html">Documentation * on elastic.co */ @@ -321,13 +614,58 @@ public CompletableFuture> rollupSear } /** - * Enables searching rolled-up data using the standard Query DSL. + * Search rolled-up data. The rollup search endpoint is needed because, + * internally, rolled-up documents utilize a different document structure than + * the original data. It rewrites standard Query DSL into a format that matches + * the rollup documents then takes the response and rewrites it back to what a + * client would expect given the original query. + *

    + * The request body supports a subset of features from the regular search API. + * The following functionality is not available: + *

    + * size: Because rollups work on pre-aggregated data, no search + * hits can be returned and so size must be set to zero or omitted entirely. + * highlighter, suggestors, post_filter, + * profile, explain: These are similarly disallowed. + *

    + * Searching both historical rollup and non-rollup data + *

    + * The rollup search API has the capability to search across both + * "live" non-rollup data and the aggregated rollup data. This is done + * by simply adding the live indices to the URI. For example: + * + *

    +	 * GET sensor-1,sensor_rollup/_rollup_search
    +	 * {
    +	 *   "size": 0,
    +	 *   "aggregations": {
    +	 *      "max_temperature": {
    +	 *       "max": {
    +	 *         "field": "temperature"
    +	 *       }
    +	 *     }
    +	 *   }
    +	 * }
    +	 * 
    +	 * 
    + *

    + * The rollup search endpoint does two things when the search runs: + *

      + *
    • The original request is sent to the non-rollup index unaltered.
    • + *
    • A rewritten version of the original request is sent to the rollup + * index.
    • + *
    + *

    + * When the two responses are received, the endpoint rewrites the rollup + * response and merges the two together. During the merging process, if there is + * any overlap in buckets between the two responses, the buckets from the + * non-rollup index are used. * * @param fn * a function that initializes a builder to create the * {@link RollupSearchRequest} * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/rollup-search.html">Documentation * on elastic.co */ @@ -339,10 +677,12 @@ public final CompletableFuture> roll // ----- Endpoint: rollup.start_job /** - * Starts an existing, stopped rollup job. + * Start rollup jobs. If you try to start a job that does not exist, an + * exception occurs. If you try to start a job that is already started, nothing + * happens. * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/rollup-start-job.html">Documentation * on elastic.co */ @@ -354,13 +694,15 @@ public CompletableFuture startJob(StartJobRequest request) { } /** - * Starts an existing, stopped rollup job. + * Start rollup jobs. If you try to start a job that does not exist, an + * exception occurs. If you try to start a job that is already started, nothing + * happens. * * @param fn * a function that initializes a builder to create the * {@link StartJobRequest} * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/rollup-start-job.html">Documentation * on elastic.co */ @@ -372,10 +714,25 @@ public final CompletableFuture startJob( // ----- Endpoint: rollup.stop_job /** - * Stops an existing, started rollup job. + * Stop rollup jobs. If you try to stop a job that does not exist, an exception + * occurs. If you try to stop a job that is already stopped, nothing happens. + *

    + * Since only a stopped job can be deleted, it can be useful to block the API + * until the indexer has fully stopped. This is accomplished with the + * wait_for_completion query parameter, and optionally a timeout. + * For example: + * + *

    +	 * POST _rollup/job/sensor/_stop?wait_for_completion=true&timeout=10s
    +	 * 
    +	 * 
    + *

    + * The parameter blocks the API call from returning until either the job has + * moved to STOPPED or the specified time has elapsed. If the specified time + * elapses without the job moving to STOPPED, a timeout exception occurs. * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/rollup-stop-job.html">Documentation * on elastic.co */ @@ -387,13 +744,28 @@ public CompletableFuture stopJob(StopJobRequest request) { } /** - * Stops an existing, started rollup job. + * Stop rollup jobs. If you try to stop a job that does not exist, an exception + * occurs. If you try to stop a job that is already stopped, nothing happens. + *

    + * Since only a stopped job can be deleted, it can be useful to block the API + * until the indexer has fully stopped. This is accomplished with the + * wait_for_completion query parameter, and optionally a timeout. + * For example: + * + *

    +	 * POST _rollup/job/sensor/_stop?wait_for_completion=true&timeout=10s
    +	 * 
    +	 * 
    + *

    + * The parameter blocks the API call from returning until either the job has + * moved to STOPPED or the specified time has elapsed. If the specified time + * elapses without the job moving to STOPPED, a timeout exception occurs. * * @param fn * a function that initializes a builder to create the * {@link StopJobRequest} * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/rollup-stop-job.html">Documentation * on elastic.co */ diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/rollup/ElasticsearchRollupClient.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/rollup/ElasticsearchRollupClient.java index f8b4584cc..c0b8d808b 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/rollup/ElasticsearchRollupClient.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/rollup/ElasticsearchRollupClient.java @@ -70,10 +70,38 @@ public ElasticsearchRollupClient withTransportOptions(@Nullable TransportOptions // ----- Endpoint: rollup.delete_job /** - * Deletes an existing rollup job. + * Delete a rollup job. + *

    + * A job must be stopped before it can be deleted. If you attempt to delete a + * started job, an error occurs. Similarly, if you attempt to delete a + * nonexistent job, an exception occurs. + *

    + * IMPORTANT: When you delete a job, you remove only the process that is + * actively monitoring and rolling up data. The API does not delete any + * previously rolled up data. This is by design; a user may wish to roll up a + * static data set. Because the data set is static, after it has been fully + * rolled up there is no need to keep the indexing rollup job around (as there + * will be no new data). Thus the job can be deleted, leaving behind the rolled + * up data for analysis. If you wish to also remove the rollup data and the + * rollup index contains the data for only a single job, you can delete the + * whole rollup index. If the rollup index stores data from several jobs, you + * must issue a delete-by-query that targets the rollup job's identifier in the + * rollup index. For example: * + *

    +	 * POST my_rollup_index/_delete_by_query
    +	 * {
    +	 *   "query": {
    +	 *     "term": {
    +	 *       "_rollup.id": "the_rollup_job_id"
    +	 *     }
    +	 *   }
    +	 * }
    +	 * 
    +	 * 
    + * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/rollup-delete-job.html">Documentation * on elastic.co */ @@ -85,13 +113,41 @@ public DeleteJobResponse deleteJob(DeleteJobRequest request) throws IOException, } /** - * Deletes an existing rollup job. + * Delete a rollup job. + *

    + * A job must be stopped before it can be deleted. If you attempt to delete a + * started job, an error occurs. Similarly, if you attempt to delete a + * nonexistent job, an exception occurs. + *

    + * IMPORTANT: When you delete a job, you remove only the process that is + * actively monitoring and rolling up data. The API does not delete any + * previously rolled up data. This is by design; a user may wish to roll up a + * static data set. Because the data set is static, after it has been fully + * rolled up there is no need to keep the indexing rollup job around (as there + * will be no new data). Thus the job can be deleted, leaving behind the rolled + * up data for analysis. If you wish to also remove the rollup data and the + * rollup index contains the data for only a single job, you can delete the + * whole rollup index. If the rollup index stores data from several jobs, you + * must issue a delete-by-query that targets the rollup job's identifier in the + * rollup index. For example: * + *

    +	 * POST my_rollup_index/_delete_by_query
    +	 * {
    +	 *   "query": {
    +	 *     "term": {
    +	 *       "_rollup.id": "the_rollup_job_id"
    +	 *     }
    +	 *   }
    +	 * }
    +	 * 
    +	 * 
    + * * @param fn * a function that initializes a builder to create the * {@link DeleteJobRequest} * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/rollup-delete-job.html">Documentation * on elastic.co */ @@ -103,10 +159,16 @@ public final DeleteJobResponse deleteJob(Function + * NOTE: This API returns only active (both STARTED and + * STOPPED) jobs. If a job was created, ran for a while, then was + * deleted, the API does not return any details about it. For details about a + * historical rollup job, the rollup capabilities API may be more useful. * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/rollup-get-job.html">Documentation * on elastic.co */ @@ -118,13 +180,19 @@ public GetJobsResponse getJobs(GetJobsRequest request) throws IOException, Elast } /** - * Retrieves the configuration, stats, and status of rollup jobs. + * Get rollup job information. Get the configuration, stats, and status of + * rollup jobs. + *

    + * NOTE: This API returns only active (both STARTED and + * STOPPED) jobs. If a job was created, ran for a while, then was + * deleted, the API does not return any details about it. For details about a + * historical rollup job, the rollup capabilities API may be more useful. * * @param fn * a function that initializes a builder to create the * {@link GetJobsRequest} * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/rollup-get-job.html">Documentation * on elastic.co */ @@ -134,10 +202,16 @@ public final GetJobsResponse getJobs(Function + * NOTE: This API returns only active (both STARTED and + * STOPPED) jobs. If a job was created, ran for a while, then was + * deleted, the API does not return any details about it. For details about a + * historical rollup job, the rollup capabilities API may be more useful. * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/rollup-get-job.html">Documentation * on elastic.co */ @@ -149,11 +223,23 @@ public GetJobsResponse getJobs() throws IOException, ElasticsearchException { // ----- Endpoint: rollup.get_rollup_caps /** - * Returns the capabilities of any rollup jobs that have been configured for a - * specific index or index pattern. - * + * Get the rollup job capabilities. Get the capabilities of any rollup jobs that + * have been configured for a specific index or index pattern. + *

    + * This API is useful because a rollup job is often configured to rollup only a + * subset of fields from the source index. Furthermore, only certain + * aggregations can be configured for various fields, leading to a limited + * subset of functionality depending on that configuration. This API enables you + * to inspect an index and determine: + *

      + *
    1. Does this index have associated rollup data somewhere in the + * cluster?
    2. + *
    3. If yes to the first question, what fields were rolled up, what + * aggregations can be performed, and where does the data live?
    4. + *
    + * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/rollup-get-rollup-caps.html">Documentation * on elastic.co */ @@ -166,14 +252,26 @@ public GetRollupCapsResponse getRollupCaps(GetRollupCapsRequest request) } /** - * Returns the capabilities of any rollup jobs that have been configured for a - * specific index or index pattern. - * + * Get the rollup job capabilities. Get the capabilities of any rollup jobs that + * have been configured for a specific index or index pattern. + *

    + * This API is useful because a rollup job is often configured to rollup only a + * subset of fields from the source index. Furthermore, only certain + * aggregations can be configured for various fields, leading to a limited + * subset of functionality depending on that configuration. This API enables you + * to inspect an index and determine: + *

      + *
    1. Does this index have associated rollup data somewhere in the + * cluster?
    2. + *
    3. If yes to the first question, what fields were rolled up, what + * aggregations can be performed, and where does the data live?
    4. + *
    + * * @param fn * a function that initializes a builder to create the * {@link GetRollupCapsRequest} * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/rollup-get-rollup-caps.html">Documentation * on elastic.co */ @@ -184,11 +282,23 @@ public final GetRollupCapsResponse getRollupCaps( } /** - * Returns the capabilities of any rollup jobs that have been configured for a - * specific index or index pattern. - * + * Get the rollup job capabilities. Get the capabilities of any rollup jobs that + * have been configured for a specific index or index pattern. + *

    + * This API is useful because a rollup job is often configured to rollup only a + * subset of fields from the source index. Furthermore, only certain + * aggregations can be configured for various fields, leading to a limited + * subset of functionality depending on that configuration. This API enables you + * to inspect an index and determine: + *

      + *
    1. Does this index have associated rollup data somewhere in the + * cluster?
    2. + *
    3. If yes to the first question, what fields were rolled up, what + * aggregations can be performed, and where does the data live?
    4. + *
    + * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/rollup-get-rollup-caps.html">Documentation * on elastic.co */ @@ -200,11 +310,19 @@ public GetRollupCapsResponse getRollupCaps() throws IOException, ElasticsearchEx // ----- Endpoint: rollup.get_rollup_index_caps /** - * Returns the rollup capabilities of all jobs inside of a rollup index (for - * example, the index where rollup data is stored). - * + * Get the rollup index capabilities. Get the rollup capabilities of all jobs + * inside of a rollup index. A single rollup index may store the data for + * multiple rollup jobs and may have a variety of capabilities depending on + * those jobs. This API enables you to determine: + *
      + *
    • What jobs are stored in an index (or indices specified via a + * pattern)?
    • + *
    • What target indices were rolled up, what fields were used in those + * rollups, and what aggregations can be performed on each job?
    • + *
    + * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/rollup-get-rollup-index-caps.html">Documentation * on elastic.co */ @@ -217,14 +335,22 @@ public GetRollupIndexCapsResponse getRollupIndexCaps(GetRollupIndexCapsRequest r } /** - * Returns the rollup capabilities of all jobs inside of a rollup index (for - * example, the index where rollup data is stored). - * + * Get the rollup index capabilities. Get the rollup capabilities of all jobs + * inside of a rollup index. A single rollup index may store the data for + * multiple rollup jobs and may have a variety of capabilities depending on + * those jobs. This API enables you to determine: + *
      + *
    • What jobs are stored in an index (or indices specified via a + * pattern)?
    • + *
    • What target indices were rolled up, what fields were used in those + * rollups, and what aggregations can be performed on each job?
    • + *
    + * * @param fn * a function that initializes a builder to create the * {@link GetRollupIndexCapsRequest} * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/rollup-get-rollup-index-caps.html">Documentation * on elastic.co */ @@ -237,10 +363,26 @@ public final GetRollupIndexCapsResponse getRollupIndexCaps( // ----- Endpoint: rollup.put_job /** - * Creates a rollup job. + * Create a rollup job. + *

    + * WARNING: From 8.15.0, calling this API in a cluster with no rollup usage will + * fail with a message about the deprecation and planned removal of rollup + * features. A cluster needs to contain either a rollup job or a rollup index in + * order for this API to be allowed to run. + *

    + * The rollup job configuration contains all the details about how the job + * should run, when it indexes documents, and what future queries will be able + * to run against the rollup index. + *

    + * There are three main sections to the job configuration: the logistical + * details about the job (for example, the cron schedule), the fields that are + * used for grouping, and what metrics to collect for each group. + *

    + * Jobs are created in a STOPPED state. You can start them with the + * start rollup jobs API. * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/rollup-put-job.html">Documentation * on elastic.co */ @@ -252,13 +394,29 @@ public PutJobResponse putJob(PutJobRequest request) throws IOException, Elastics } /** - * Creates a rollup job. + * Create a rollup job. + *

    + * WARNING: From 8.15.0, calling this API in a cluster with no rollup usage will + * fail with a message about the deprecation and planned removal of rollup + * features. A cluster needs to contain either a rollup job or a rollup index in + * order for this API to be allowed to run. + *

    + * The rollup job configuration contains all the details about how the job + * should run, when it indexes documents, and what future queries will be able + * to run against the rollup index. + *

    + * There are three main sections to the job configuration: the logistical + * details about the job (for example, the cron schedule), the fields that are + * used for grouping, and what metrics to collect for each group. + *

    + * Jobs are created in a STOPPED state. You can start them with the + * start rollup jobs API. * * @param fn * a function that initializes a builder to create the * {@link PutJobRequest} * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/rollup-put-job.html">Documentation * on elastic.co */ @@ -270,10 +428,55 @@ public final PutJobResponse putJob(Function + * The request body supports a subset of features from the regular search API. + * The following functionality is not available: + *

    + * size: Because rollups work on pre-aggregated data, no search + * hits can be returned and so size must be set to zero or omitted entirely. + * highlighter, suggestors, post_filter, + * profile, explain: These are similarly disallowed. + *

    + * Searching both historical rollup and non-rollup data + *

    + * The rollup search API has the capability to search across both + * "live" non-rollup data and the aggregated rollup data. This is done + * by simply adding the live indices to the URI. For example: + * + *

    +	 * GET sensor-1,sensor_rollup/_rollup_search
    +	 * {
    +	 *   "size": 0,
    +	 *   "aggregations": {
    +	 *      "max_temperature": {
    +	 *       "max": {
    +	 *         "field": "temperature"
    +	 *       }
    +	 *     }
    +	 *   }
    +	 * }
    +	 * 
    +	 * 
    + *

    + * The rollup search endpoint does two things when the search runs: + *

      + *
    • The original request is sent to the non-rollup index unaltered.
    • + *
    • A rewritten version of the original request is sent to the rollup + * index.
    • + *
    + *

    + * When the two responses are received, the endpoint rewrites the rollup + * response and merges the two together. During the merging process, if there is + * any overlap in buckets between the two responses, the buckets from the + * non-rollup index are used. * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/rollup-search.html">Documentation * on elastic.co */ @@ -289,13 +492,58 @@ public RollupSearchResponse rollupSearch(RollupSearchRequ } /** - * Enables searching rolled-up data using the standard Query DSL. + * Search rolled-up data. The rollup search endpoint is needed because, + * internally, rolled-up documents utilize a different document structure than + * the original data. It rewrites standard Query DSL into a format that matches + * the rollup documents then takes the response and rewrites it back to what a + * client would expect given the original query. + *

    + * The request body supports a subset of features from the regular search API. + * The following functionality is not available: + *

    + * size: Because rollups work on pre-aggregated data, no search + * hits can be returned and so size must be set to zero or omitted entirely. + * highlighter, suggestors, post_filter, + * profile, explain: These are similarly disallowed. + *

    + * Searching both historical rollup and non-rollup data + *

    + * The rollup search API has the capability to search across both + * "live" non-rollup data and the aggregated rollup data. This is done + * by simply adding the live indices to the URI. For example: + * + *

    +	 * GET sensor-1,sensor_rollup/_rollup_search
    +	 * {
    +	 *   "size": 0,
    +	 *   "aggregations": {
    +	 *      "max_temperature": {
    +	 *       "max": {
    +	 *         "field": "temperature"
    +	 *       }
    +	 *     }
    +	 *   }
    +	 * }
    +	 * 
    +	 * 
    + *

    + * The rollup search endpoint does two things when the search runs: + *

      + *
    • The original request is sent to the non-rollup index unaltered.
    • + *
    • A rewritten version of the original request is sent to the rollup + * index.
    • + *
    + *

    + * When the two responses are received, the endpoint rewrites the rollup + * response and merges the two together. During the merging process, if there is + * any overlap in buckets between the two responses, the buckets from the + * non-rollup index are used. * * @param fn * a function that initializes a builder to create the * {@link RollupSearchRequest} * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/rollup-search.html">Documentation * on elastic.co */ @@ -306,10 +554,55 @@ public final RollupSearchResponse rollupSearch( } /** - * Enables searching rolled-up data using the standard Query DSL. + * Search rolled-up data. The rollup search endpoint is needed because, + * internally, rolled-up documents utilize a different document structure than + * the original data. It rewrites standard Query DSL into a format that matches + * the rollup documents then takes the response and rewrites it back to what a + * client would expect given the original query. + *

    + * The request body supports a subset of features from the regular search API. + * The following functionality is not available: + *

    + * size: Because rollups work on pre-aggregated data, no search + * hits can be returned and so size must be set to zero or omitted entirely. + * highlighter, suggestors, post_filter, + * profile, explain: These are similarly disallowed. + *

    + * Searching both historical rollup and non-rollup data + *

    + * The rollup search API has the capability to search across both + * "live" non-rollup data and the aggregated rollup data. This is done + * by simply adding the live indices to the URI. For example: + * + *

    +	 * GET sensor-1,sensor_rollup/_rollup_search
    +	 * {
    +	 *   "size": 0,
    +	 *   "aggregations": {
    +	 *      "max_temperature": {
    +	 *       "max": {
    +	 *         "field": "temperature"
    +	 *       }
    +	 *     }
    +	 *   }
    +	 * }
    +	 * 
    +	 * 
    + *

    + * The rollup search endpoint does two things when the search runs: + *

      + *
    • The original request is sent to the non-rollup index unaltered.
    • + *
    • A rewritten version of the original request is sent to the rollup + * index.
    • + *
    + *

    + * When the two responses are received, the endpoint rewrites the rollup + * response and merges the two together. During the merging process, if there is + * any overlap in buckets between the two responses, the buckets from the + * non-rollup index are used. * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/rollup-search.html">Documentation * on elastic.co */ @@ -325,13 +618,58 @@ public RollupSearchResponse rollupSearch(RollupSearchRequ } /** - * Enables searching rolled-up data using the standard Query DSL. + * Search rolled-up data. The rollup search endpoint is needed because, + * internally, rolled-up documents utilize a different document structure than + * the original data. It rewrites standard Query DSL into a format that matches + * the rollup documents then takes the response and rewrites it back to what a + * client would expect given the original query. + *

    + * The request body supports a subset of features from the regular search API. + * The following functionality is not available: + *

    + * size: Because rollups work on pre-aggregated data, no search + * hits can be returned and so size must be set to zero or omitted entirely. + * highlighter, suggestors, post_filter, + * profile, explain: These are similarly disallowed. + *

    + * Searching both historical rollup and non-rollup data + *

    + * The rollup search API has the capability to search across both + * "live" non-rollup data and the aggregated rollup data. This is done + * by simply adding the live indices to the URI. For example: + * + *

    +	 * GET sensor-1,sensor_rollup/_rollup_search
    +	 * {
    +	 *   "size": 0,
    +	 *   "aggregations": {
    +	 *      "max_temperature": {
    +	 *       "max": {
    +	 *         "field": "temperature"
    +	 *       }
    +	 *     }
    +	 *   }
    +	 * }
    +	 * 
    +	 * 
    + *

    + * The rollup search endpoint does two things when the search runs: + *

      + *
    • The original request is sent to the non-rollup index unaltered.
    • + *
    • A rewritten version of the original request is sent to the rollup + * index.
    • + *
    + *

    + * When the two responses are received, the endpoint rewrites the rollup + * response and merges the two together. During the merging process, if there is + * any overlap in buckets between the two responses, the buckets from the + * non-rollup index are used. * * @param fn * a function that initializes a builder to create the * {@link RollupSearchRequest} * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/rollup-search.html">Documentation * on elastic.co */ @@ -344,10 +682,12 @@ public final RollupSearchResponse rollupSearch( // ----- Endpoint: rollup.start_job /** - * Starts an existing, stopped rollup job. + * Start rollup jobs. If you try to start a job that does not exist, an + * exception occurs. If you try to start a job that is already started, nothing + * happens. * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/rollup-start-job.html">Documentation * on elastic.co */ @@ -359,13 +699,15 @@ public StartJobResponse startJob(StartJobRequest request) throws IOException, El } /** - * Starts an existing, stopped rollup job. + * Start rollup jobs. If you try to start a job that does not exist, an + * exception occurs. If you try to start a job that is already started, nothing + * happens. * * @param fn * a function that initializes a builder to create the * {@link StartJobRequest} * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/rollup-start-job.html">Documentation * on elastic.co */ @@ -377,10 +719,25 @@ public final StartJobResponse startJob(Function + * Since only a stopped job can be deleted, it can be useful to block the API + * until the indexer has fully stopped. This is accomplished with the + * wait_for_completion query parameter, and optionally a timeout. + * For example: + * + *

    +	 * POST _rollup/job/sensor/_stop?wait_for_completion=true&timeout=10s
    +	 * 
    +	 * 
    + *

    + * The parameter blocks the API call from returning until either the job has + * moved to STOPPED or the specified time has elapsed. If the specified time + * elapses without the job moving to STOPPED, a timeout exception occurs. * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/rollup-stop-job.html">Documentation * on elastic.co */ @@ -392,13 +749,28 @@ public StopJobResponse stopJob(StopJobRequest request) throws IOException, Elast } /** - * Stops an existing, started rollup job. + * Stop rollup jobs. If you try to stop a job that does not exist, an exception + * occurs. If you try to stop a job that is already stopped, nothing happens. + *

    + * Since only a stopped job can be deleted, it can be useful to block the API + * until the indexer has fully stopped. This is accomplished with the + * wait_for_completion query parameter, and optionally a timeout. + * For example: + * + *

    +	 * POST _rollup/job/sensor/_stop?wait_for_completion=true&timeout=10s
    +	 * 
    +	 * 
    + *

    + * The parameter blocks the API call from returning until either the job has + * moved to STOPPED or the specified time has elapsed. If the specified time + * elapses without the job moving to STOPPED, a timeout exception occurs. * * @param fn * a function that initializes a builder to create the * {@link StopJobRequest} * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/rollup-stop-job.html">Documentation * on elastic.co */ diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/rollup/GetJobsRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/rollup/GetJobsRequest.java index 29f9fe583..6769df348 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/rollup/GetJobsRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/rollup/GetJobsRequest.java @@ -55,12 +55,19 @@ // typedef: rollup.get_jobs.Request /** - * Retrieves the configuration, stats, and status of rollup jobs. + * Get rollup job information. Get the configuration, stats, and status of + * rollup jobs. + *

    + * NOTE: This API returns only active (both STARTED and + * STOPPED) jobs. If a job was created, ran for a while, then was + * deleted, the API does not return any details about it. For details about a + * historical rollup job, the rollup capabilities API may be more useful. * * @see API * specification + * @deprecated 8.11.0 */ - +@Deprecated public class GetJobsRequest extends RequestBase { @Nullable private final String id; @@ -93,7 +100,7 @@ public final String id() { /** * Builder for {@link GetJobsRequest}. */ - + @Deprecated public static class Builder extends RequestBase.AbstractBuilder implements ObjectBuilder { @Nullable private String id; diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/rollup/GetRollupCapsRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/rollup/GetRollupCapsRequest.java index 7f1a32ac5..137de339f 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/rollup/GetRollupCapsRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/rollup/GetRollupCapsRequest.java @@ -55,13 +55,26 @@ // typedef: rollup.get_rollup_caps.Request /** - * Returns the capabilities of any rollup jobs that have been configured for a - * specific index or index pattern. - * + * Get the rollup job capabilities. Get the capabilities of any rollup jobs that + * have been configured for a specific index or index pattern. + *

    + * This API is useful because a rollup job is often configured to rollup only a + * subset of fields from the source index. Furthermore, only certain + * aggregations can be configured for various fields, leading to a limited + * subset of functionality depending on that configuration. This API enables you + * to inspect an index and determine: + *

      + *
    1. Does this index have associated rollup data somewhere in the + * cluster?
    2. + *
    3. If yes to the first question, what fields were rolled up, what + * aggregations can be performed, and where does the data live?
    4. + *
    + * * @see API * specification + * @deprecated 8.11.0 */ - +@Deprecated public class GetRollupCapsRequest extends RequestBase { @Nullable private final String id; @@ -94,7 +107,7 @@ public final String id() { /** * Builder for {@link GetRollupCapsRequest}. */ - + @Deprecated public static class Builder extends RequestBase.AbstractBuilder implements ObjectBuilder { diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/rollup/GetRollupIndexCapsRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/rollup/GetRollupIndexCapsRequest.java index d4f711672..a7f0ce3bc 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/rollup/GetRollupIndexCapsRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/rollup/GetRollupIndexCapsRequest.java @@ -58,14 +58,23 @@ // typedef: rollup.get_rollup_index_caps.Request /** - * Returns the rollup capabilities of all jobs inside of a rollup index (for - * example, the index where rollup data is stored). - * + * Get the rollup index capabilities. Get the rollup capabilities of all jobs + * inside of a rollup index. A single rollup index may store the data for + * multiple rollup jobs and may have a variety of capabilities depending on + * those jobs. This API enables you to determine: + *
      + *
    • What jobs are stored in an index (or indices specified via a + * pattern)?
    • + *
    • What target indices were rolled up, what fields were used in those + * rollups, and what aggregations can be performed on each job?
    • + *
    + * * @see API * specification + * @deprecated 8.11.0 */ - +@Deprecated public class GetRollupIndexCapsRequest extends RequestBase { private final List index; @@ -96,7 +105,7 @@ public final List index() { /** * Builder for {@link GetRollupIndexCapsRequest}. */ - + @Deprecated public static class Builder extends RequestBase.AbstractBuilder implements ObjectBuilder { diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/rollup/PutJobRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/rollup/PutJobRequest.java index f556d0ce9..11d95fc6a 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/rollup/PutJobRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/rollup/PutJobRequest.java @@ -61,11 +61,29 @@ // typedef: rollup.put_job.Request /** - * Creates a rollup job. + * Create a rollup job. + *

    + * WARNING: From 8.15.0, calling this API in a cluster with no rollup usage will + * fail with a message about the deprecation and planned removal of rollup + * features. A cluster needs to contain either a rollup job or a rollup index in + * order for this API to be allowed to run. + *

    + * The rollup job configuration contains all the details about how the job + * should run, when it indexes documents, and what future queries will be able + * to run against the rollup index. + *

    + * There are three main sections to the job configuration: the logistical + * details about the job (for example, the cron schedule), the fields that are + * used for grouping, and what metrics to collect for each group. + *

    + * Jobs are created in a STOPPED state. You can start them with the + * start rollup jobs API. * * @see API * specification + * @deprecated 8.11.0 */ +@Deprecated @JsonpDeserializable public class PutJobRequest extends RequestBase implements JsonpSerializable { private final String cron; @@ -284,7 +302,7 @@ protected void serializeInternal(JsonGenerator generator, JsonpMapper mapper) { /** * Builder for {@link PutJobRequest}. */ - + @Deprecated public static class Builder extends RequestBase.AbstractBuilder implements ObjectBuilder { private String cron; diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/rollup/RollupSearchRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/rollup/RollupSearchRequest.java index 0eab9af1c..35689a982 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/rollup/RollupSearchRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/rollup/RollupSearchRequest.java @@ -63,11 +63,58 @@ // typedef: rollup.rollup_search.Request /** - * Enables searching rolled-up data using the standard Query DSL. + * Search rolled-up data. The rollup search endpoint is needed because, + * internally, rolled-up documents utilize a different document structure than + * the original data. It rewrites standard Query DSL into a format that matches + * the rollup documents then takes the response and rewrites it back to what a + * client would expect given the original query. + *

    + * The request body supports a subset of features from the regular search API. + * The following functionality is not available: + *

    + * size: Because rollups work on pre-aggregated data, no search + * hits can be returned and so size must be set to zero or omitted entirely. + * highlighter, suggestors, post_filter, + * profile, explain: These are similarly disallowed. + *

    + * Searching both historical rollup and non-rollup data + *

    + * The rollup search API has the capability to search across both + * "live" non-rollup data and the aggregated rollup data. This is done + * by simply adding the live indices to the URI. For example: + * + *

    + * GET sensor-1,sensor_rollup/_rollup_search
    + * {
    + *   "size": 0,
    + *   "aggregations": {
    + *      "max_temperature": {
    + *       "max": {
    + *         "field": "temperature"
    + *       }
    + *     }
    + *   }
    + * }
    + * 
    + * 
    + *

    + * The rollup search endpoint does two things when the search runs: + *

      + *
    • The original request is sent to the non-rollup index unaltered.
    • + *
    • A rewritten version of the original request is sent to the rollup + * index.
    • + *
    + *

    + * When the two responses are received, the endpoint rewrites the rollup + * response and merges the two together. During the merging process, if there is + * any overlap in buckets between the two responses, the buckets from the + * non-rollup index are used. * * @see API * specification + * @deprecated 8.11.0 */ +@Deprecated @JsonpDeserializable public class RollupSearchRequest extends RequestBase implements JsonpSerializable { private final Map aggregations; @@ -105,7 +152,20 @@ public final Map aggregations() { } /** - * Required - Enables searching rolled-up data using the standard Query DSL. + * Required - A comma-separated list of data streams and indices used to limit + * the request. This parameter has the following rules: + *

      + *
    • At least one data stream, index, or wildcard expression must be + * specified. This target can include a rollup or non-rollup index. For data + * streams, the stream's backing indices can only serve as non-rollup indices. + * Omitting the parameter or using _all are not permitted.
    • + *
    • Multiple non-rollup indices may be specified.
    • + *
    • Only one rollup index may be specified. If more than one are supplied, an + * exception occurs.
    • + *
    • Wildcard expressions (*) may be used. If they match more + * than one rollup index, an exception occurs. However, you can use an + * expression to match multiple non-rollup indices or data streams.
    • + *
    *

    * API name: {@code index} */ @@ -114,7 +174,7 @@ public final List index() { } /** - * Specifies a DSL query. + * Specifies a DSL query that is subject to some limitations. *

    * API name: {@code query} */ @@ -173,7 +233,7 @@ protected void serializeInternal(JsonGenerator generator, JsonpMapper mapper) { /** * Builder for {@link RollupSearchRequest}. */ - + @Deprecated public static class Builder extends RequestBase.AbstractBuilder implements ObjectBuilder { @@ -224,7 +284,20 @@ public final Builder aggregations(String key, Function + *

  • At least one data stream, index, or wildcard expression must be + * specified. This target can include a rollup or non-rollup index. For data + * streams, the stream's backing indices can only serve as non-rollup indices. + * Omitting the parameter or using _all are not permitted.
  • + *
  • Multiple non-rollup indices may be specified.
  • + *
  • Only one rollup index may be specified. If more than one are supplied, an + * exception occurs.
  • + *
  • Wildcard expressions (*) may be used. If they match more + * than one rollup index, an exception occurs. However, you can use an + * expression to match multiple non-rollup indices or data streams.
  • + * *

    * API name: {@code index} *

    @@ -236,7 +309,20 @@ public final Builder index(List list) { } /** - * Required - Enables searching rolled-up data using the standard Query DSL. + * Required - A comma-separated list of data streams and indices used to limit + * the request. This parameter has the following rules: + *

      + *
    • At least one data stream, index, or wildcard expression must be + * specified. This target can include a rollup or non-rollup index. For data + * streams, the stream's backing indices can only serve as non-rollup indices. + * Omitting the parameter or using _all are not permitted.
    • + *
    • Multiple non-rollup indices may be specified.
    • + *
    • Only one rollup index may be specified. If more than one are supplied, an + * exception occurs.
    • + *
    • Wildcard expressions (*) may be used. If they match more + * than one rollup index, an exception occurs. However, you can use an + * expression to match multiple non-rollup indices or data streams.
    • + *
    *

    * API name: {@code index} *

    @@ -248,7 +334,7 @@ public final Builder index(String value, String... values) { } /** - * Specifies a DSL query. + * Specifies a DSL query that is subject to some limitations. *

    * API name: {@code query} */ @@ -258,7 +344,7 @@ public final Builder query(@Nullable Query value) { } /** - * Specifies a DSL query. + * Specifies a DSL query that is subject to some limitations. *

    * API name: {@code query} */ diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/rollup/StartJobRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/rollup/StartJobRequest.java index dc63e9189..ed29d956b 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/rollup/StartJobRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/rollup/StartJobRequest.java @@ -56,12 +56,15 @@ // typedef: rollup.start_job.Request /** - * Starts an existing, stopped rollup job. + * Start rollup jobs. If you try to start a job that does not exist, an + * exception occurs. If you try to start a job that is already started, nothing + * happens. * * @see API * specification + * @deprecated 8.11.0 */ - +@Deprecated public class StartJobRequest extends RequestBase { private final String id; @@ -91,7 +94,7 @@ public final String id() { /** * Builder for {@link StartJobRequest}. */ - + @Deprecated public static class Builder extends RequestBase.AbstractBuilder implements ObjectBuilder { private String id; diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/rollup/StopJobRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/rollup/StopJobRequest.java index 08eb7d861..0d94dee96 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/rollup/StopJobRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/rollup/StopJobRequest.java @@ -57,12 +57,28 @@ // typedef: rollup.stop_job.Request /** - * Stops an existing, started rollup job. + * Stop rollup jobs. If you try to stop a job that does not exist, an exception + * occurs. If you try to stop a job that is already stopped, nothing happens. + *

    + * Since only a stopped job can be deleted, it can be useful to block the API + * until the indexer has fully stopped. This is accomplished with the + * wait_for_completion query parameter, and optionally a timeout. + * For example: + * + *

    + * POST _rollup/job/sensor/_stop?wait_for_completion=true&timeout=10s
    + * 
    + * 
    + *

    + * The parameter blocks the API call from returning until either the job has + * moved to STOPPED or the specified time has elapsed. If the specified time + * elapses without the job moving to STOPPED, a timeout exception occurs. * * @see API * specification + * @deprecated 8.11.0 */ - +@Deprecated public class StopJobRequest extends RequestBase { private final String id; @@ -99,7 +115,9 @@ public final String id() { * If wait_for_completion is true, the API blocks for * (at maximum) the specified duration while waiting for the job to stop. If * more than timeout time has passed, the API throws a timeout - * exception. + * exception. NOTE: Even if a timeout occurs, the stop request is still + * processing and eventually moves the job to STOPPED. The timeout simply means + * the API call itself timed out while waiting for the status change. *

    * API name: {@code timeout} */ @@ -125,7 +143,7 @@ public final Boolean waitForCompletion() { /** * Builder for {@link StopJobRequest}. */ - + @Deprecated public static class Builder extends RequestBase.AbstractBuilder implements ObjectBuilder { private String id; @@ -149,7 +167,9 @@ public final Builder id(String value) { * If wait_for_completion is true, the API blocks for * (at maximum) the specified duration while waiting for the job to stop. If * more than timeout time has passed, the API throws a timeout - * exception. + * exception. NOTE: Even if a timeout occurs, the stop request is still + * processing and eventually moves the job to STOPPED. The timeout simply means + * the API call itself timed out while waiting for the status change. *

    * API name: {@code timeout} */ @@ -162,7 +182,9 @@ public final Builder timeout(@Nullable Time value) { * If wait_for_completion is true, the API blocks for * (at maximum) the specified duration while waiting for the job to stop. If * more than timeout time has passed, the API throws a timeout - * exception. + * exception. NOTE: Even if a timeout occurs, the stop request is still + * processing and eventually moves the job to STOPPED. The timeout simply means + * the API call itself timed out while waiting for the status change. *

    * API name: {@code timeout} */ diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/rollup/get_jobs/RollupJob.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/rollup/get_jobs/RollupJob.java index 85cf1b639..479781916 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/rollup/get_jobs/RollupJob.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/rollup/get_jobs/RollupJob.java @@ -79,21 +79,30 @@ public static RollupJob of(Function> fn) { } /** - * Required - API name: {@code config} + * Required - The rollup job configuration. + *

    + * API name: {@code config} */ public final RollupJobConfiguration config() { return this.config; } /** - * Required - API name: {@code stats} + * Required - Transient statistics about the rollup job, such as how many + * documents have been processed and how many rollup summary docs have been + * indexed. These stats are not persisted. If a node is restarted, these stats + * are reset. + *

    + * API name: {@code stats} */ public final RollupJobStats stats() { return this.stats; } /** - * Required - API name: {@code status} + * Required - The current status of the indexer for the rollup job. + *

    + * API name: {@code status} */ public final RollupJobStatus status() { return this.status; @@ -140,7 +149,9 @@ public static class Builder extends WithJsonObjectBuilderBase implement private RollupJobStatus status; /** - * Required - API name: {@code config} + * Required - The rollup job configuration. + *

    + * API name: {@code config} */ public final Builder config(RollupJobConfiguration value) { this.config = value; @@ -148,7 +159,9 @@ public final Builder config(RollupJobConfiguration value) { } /** - * Required - API name: {@code config} + * Required - The rollup job configuration. + *

    + * API name: {@code config} */ public final Builder config( Function> fn) { @@ -156,7 +169,12 @@ public final Builder config( } /** - * Required - API name: {@code stats} + * Required - Transient statistics about the rollup job, such as how many + * documents have been processed and how many rollup summary docs have been + * indexed. These stats are not persisted. If a node is restarted, these stats + * are reset. + *

    + * API name: {@code stats} */ public final Builder stats(RollupJobStats value) { this.stats = value; @@ -164,14 +182,21 @@ public final Builder stats(RollupJobStats value) { } /** - * Required - API name: {@code stats} + * Required - Transient statistics about the rollup job, such as how many + * documents have been processed and how many rollup summary docs have been + * indexed. These stats are not persisted. If a node is restarted, these stats + * are reset. + *

    + * API name: {@code stats} */ public final Builder stats(Function> fn) { return this.stats(fn.apply(new RollupJobStats.Builder()).build()); } /** - * Required - API name: {@code status} + * Required - The current status of the indexer for the rollup job. + *

    + * API name: {@code status} */ public final Builder status(RollupJobStatus value) { this.status = value; @@ -179,7 +204,9 @@ public final Builder status(RollupJobStatus value) { } /** - * Required - API name: {@code status} + * Required - The current status of the indexer for the rollup job. + *

    + * API name: {@code status} */ public final Builder status(Function> fn) { return this.status(fn.apply(new RollupJobStatus.Builder()).build()); diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/rollup/get_rollup_caps/RollupCapabilities.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/rollup/get_rollup_caps/RollupCapabilities.java index ac7797edd..ab0772502 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/rollup/get_rollup_caps/RollupCapabilities.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/rollup/get_rollup_caps/RollupCapabilities.java @@ -75,7 +75,11 @@ public static RollupCapabilities of(Function + * API name: {@code rollup_jobs} */ public final List rollupJobs() { return this.rollupJobs; @@ -122,7 +126,11 @@ public static class Builder extends WithJsonObjectBuilderBase private List rollupJobs; /** - * Required - API name: {@code rollup_jobs} + * Required - There can be multiple, independent jobs configured for a single + * index or index pattern. Each of these jobs may have different configurations, + * so the API returns a list of all the various configurations available. + *

    + * API name: {@code rollup_jobs} *

    * Adds all elements of list to rollupJobs. */ @@ -132,7 +140,11 @@ public final Builder rollupJobs(List list) { } /** - * Required - API name: {@code rollup_jobs} + * Required - There can be multiple, independent jobs configured for a single + * index or index pattern. Each of these jobs may have different configurations, + * so the API returns a list of all the various configurations available. + *

    + * API name: {@code rollup_jobs} *

    * Adds one or more values to rollupJobs. */ @@ -142,7 +154,11 @@ public final Builder rollupJobs(RollupCapabilitySummary value, RollupCapabilityS } /** - * Required - API name: {@code rollup_jobs} + * Required - There can be multiple, independent jobs configured for a single + * index or index pattern. Each of these jobs may have different configurations, + * so the API returns a list of all the various configurations available. + *

    + * API name: {@code rollup_jobs} *

    * Adds a value to rollupJobs using a builder lambda. */ diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/search_application/ElasticsearchSearchApplicationAsyncClient.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/search_application/ElasticsearchSearchApplicationAsyncClient.java index 26fb3a549..7dd26dcde 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/search_application/ElasticsearchSearchApplicationAsyncClient.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/search_application/ElasticsearchSearchApplicationAsyncClient.java @@ -223,7 +223,7 @@ public CompletableFuture getBehavioralAnalytics( // ----- Endpoint: search_application.list /** - * Returns the existing search applications. + * Get search applications. Get information about search applications. * * @see Documentation @@ -238,7 +238,7 @@ public CompletableFuture list(ListRequest request) { } /** - * Returns the existing search applications. + * Get search applications. Get information about search applications. * * @param fn * a function that initializes a builder to create the @@ -253,7 +253,7 @@ public final CompletableFuture list(FunctionDocumentation @@ -265,6 +265,40 @@ public CompletableFuture list() { this.transportOptions); } + // ----- Endpoint: search_application.post_behavioral_analytics_event + + /** + * Create a behavioral analytics collection event. + * + * @see Documentation + * on elastic.co + */ + + public CompletableFuture postBehavioralAnalyticsEvent( + PostBehavioralAnalyticsEventRequest request) { + @SuppressWarnings("unchecked") + JsonEndpoint endpoint = (JsonEndpoint) PostBehavioralAnalyticsEventRequest._ENDPOINT; + + return this.transport.performRequestAsync(request, endpoint, this.transportOptions); + } + + /** + * Create a behavioral analytics collection event. + * + * @param fn + * a function that initializes a builder to create the + * {@link PostBehavioralAnalyticsEventRequest} + * @see Documentation + * on elastic.co + */ + + public final CompletableFuture postBehavioralAnalyticsEvent( + Function> fn) { + return postBehavioralAnalyticsEvent(fn.apply(new PostBehavioralAnalyticsEventRequest.Builder()).build()); + } + // ----- Endpoint: search_application.put /** @@ -331,6 +365,57 @@ public final CompletableFuture putBehavioralAnal return putBehavioralAnalytics(fn.apply(new PutBehavioralAnalyticsRequest.Builder()).build()); } + // ----- Endpoint: search_application.render_query + + /** + * Render a search application query. Generate an Elasticsearch query using the + * specified query parameters and the search template associated with the search + * application or a default template if none is specified. If a parameter used + * in the search template is not specified in params, the + * parameter's default value will be used. The API returns the specific + * Elasticsearch query that would be generated and run by calling the search + * application search API. + *

    + * You must have read privileges on the backing alias of the search + * application. + * + * @see Documentation + * on elastic.co + */ + + public CompletableFuture renderQuery(RenderQueryRequest request) { + @SuppressWarnings("unchecked") + JsonEndpoint endpoint = (JsonEndpoint) RenderQueryRequest._ENDPOINT; + + return this.transport.performRequestAsync(request, endpoint, this.transportOptions); + } + + /** + * Render a search application query. Generate an Elasticsearch query using the + * specified query parameters and the search template associated with the search + * application or a default template if none is specified. If a parameter used + * in the search template is not specified in params, the + * parameter's default value will be used. The API returns the specific + * Elasticsearch query that would be generated and run by calling the search + * application search API. + *

    + * You must have read privileges on the backing alias of the search + * application. + * + * @param fn + * a function that initializes a builder to create the + * {@link RenderQueryRequest} + * @see Documentation + * on elastic.co + */ + + public final CompletableFuture renderQuery( + Function> fn) { + return renderQuery(fn.apply(new RenderQueryRequest.Builder()).build()); + } + // ----- Endpoint: search_application.search /** diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/search_application/ElasticsearchSearchApplicationClient.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/search_application/ElasticsearchSearchApplicationClient.java index 9f2713b8e..7d58ca6f7 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/search_application/ElasticsearchSearchApplicationClient.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/search_application/ElasticsearchSearchApplicationClient.java @@ -230,7 +230,7 @@ public GetBehavioralAnalyticsResponse getBehavioralAnalytics() throws IOExceptio // ----- Endpoint: search_application.list /** - * Returns the existing search applications. + * Get search applications. Get information about search applications. * * @see Documentation @@ -245,7 +245,7 @@ public ListResponse list(ListRequest request) throws IOException, ElasticsearchE } /** - * Returns the existing search applications. + * Get search applications. Get information about search applications. * * @param fn * a function that initializes a builder to create the @@ -261,7 +261,7 @@ public final ListResponse list(FunctionDocumentation @@ -273,6 +273,41 @@ public ListResponse list() throws IOException, ElasticsearchException { this.transportOptions); } + // ----- Endpoint: search_application.post_behavioral_analytics_event + + /** + * Create a behavioral analytics collection event. + * + * @see Documentation + * on elastic.co + */ + + public PostBehavioralAnalyticsEventResponse postBehavioralAnalyticsEvent( + PostBehavioralAnalyticsEventRequest request) throws IOException, ElasticsearchException { + @SuppressWarnings("unchecked") + JsonEndpoint endpoint = (JsonEndpoint) PostBehavioralAnalyticsEventRequest._ENDPOINT; + + return this.transport.performRequest(request, endpoint, this.transportOptions); + } + + /** + * Create a behavioral analytics collection event. + * + * @param fn + * a function that initializes a builder to create the + * {@link PostBehavioralAnalyticsEventRequest} + * @see Documentation + * on elastic.co + */ + + public final PostBehavioralAnalyticsEventResponse postBehavioralAnalyticsEvent( + Function> fn) + throws IOException, ElasticsearchException { + return postBehavioralAnalyticsEvent(fn.apply(new PostBehavioralAnalyticsEventRequest.Builder()).build()); + } + // ----- Endpoint: search_application.put /** @@ -341,6 +376,58 @@ public final PutBehavioralAnalyticsResponse putBehavioralAnalytics( return putBehavioralAnalytics(fn.apply(new PutBehavioralAnalyticsRequest.Builder()).build()); } + // ----- Endpoint: search_application.render_query + + /** + * Render a search application query. Generate an Elasticsearch query using the + * specified query parameters and the search template associated with the search + * application or a default template if none is specified. If a parameter used + * in the search template is not specified in params, the + * parameter's default value will be used. The API returns the specific + * Elasticsearch query that would be generated and run by calling the search + * application search API. + *

    + * You must have read privileges on the backing alias of the search + * application. + * + * @see Documentation + * on elastic.co + */ + + public RenderQueryResponse renderQuery(RenderQueryRequest request) throws IOException, ElasticsearchException { + @SuppressWarnings("unchecked") + JsonEndpoint endpoint = (JsonEndpoint) RenderQueryRequest._ENDPOINT; + + return this.transport.performRequest(request, endpoint, this.transportOptions); + } + + /** + * Render a search application query. Generate an Elasticsearch query using the + * specified query parameters and the search template associated with the search + * application or a default template if none is specified. If a parameter used + * in the search template is not specified in params, the + * parameter's default value will be used. The API returns the specific + * Elasticsearch query that would be generated and run by calling the search + * application search API. + *

    + * You must have read privileges on the backing alias of the search + * application. + * + * @param fn + * a function that initializes a builder to create the + * {@link RenderQueryRequest} + * @see Documentation + * on elastic.co + */ + + public final RenderQueryResponse renderQuery( + Function> fn) + throws IOException, ElasticsearchException { + return renderQuery(fn.apply(new RenderQueryRequest.Builder()).build()); + } + // ----- Endpoint: search_application.search /** diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/search_application/EventType.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/search_application/EventType.java new file mode 100644 index 000000000..42ac3ffb0 --- /dev/null +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/search_application/EventType.java @@ -0,0 +1,69 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package co.elastic.clients.elasticsearch.search_application; + +import co.elastic.clients.json.JsonEnum; +import co.elastic.clients.json.JsonpDeserializable; +import co.elastic.clients.json.JsonpDeserializer; + +//---------------------------------------------------------------- +// THIS CODE IS GENERATED. MANUAL EDITS WILL BE LOST. +//---------------------------------------------------------------- +// +// This code is generated from the Elasticsearch API specification +// at https://github.com/elastic/elasticsearch-specification +// +// Manual updates to this file will be lost when the code is +// re-generated. +// +// If you find a property that is missing or wrongly typed, please +// open an issue or a PR on the API specification repository. +// +//---------------------------------------------------------------- + +/** + * + * @see API + * specification + */ +@JsonpDeserializable +public enum EventType implements JsonEnum { + PageView("page_view"), + + Search("search"), + + SearchClick("search_click"), + + ; + + private final String jsonValue; + + EventType(String jsonValue) { + this.jsonValue = jsonValue; + } + + public String jsonValue() { + return this.jsonValue; + } + + public static final JsonEnum.Deserializer _DESERIALIZER = new JsonEnum.Deserializer<>( + EventType.values()); +} diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/search_application/ListRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/search_application/ListRequest.java index 4ed4c98e7..66e68f1a1 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/search_application/ListRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/search_application/ListRequest.java @@ -56,7 +56,7 @@ // typedef: search_application.list.Request /** - * Returns the existing search applications. + * Get search applications. Get information about search applications. * * @see API * specification diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/search_application/PostBehavioralAnalyticsEventRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/search_application/PostBehavioralAnalyticsEventRequest.java new file mode 100644 index 000000000..fe665b671 --- /dev/null +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/search_application/PostBehavioralAnalyticsEventRequest.java @@ -0,0 +1,296 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package co.elastic.clients.elasticsearch.search_application; + +import co.elastic.clients.elasticsearch._types.ErrorResponse; +import co.elastic.clients.elasticsearch._types.RequestBase; +import co.elastic.clients.json.JsonData; +import co.elastic.clients.json.JsonpDeserializable; +import co.elastic.clients.json.JsonpDeserializer; +import co.elastic.clients.json.JsonpMapper; +import co.elastic.clients.json.JsonpSerializable; +import co.elastic.clients.json.ObjectBuilderDeserializer; +import co.elastic.clients.json.ObjectDeserializer; +import co.elastic.clients.transport.Endpoint; +import co.elastic.clients.transport.endpoints.SimpleEndpoint; +import co.elastic.clients.util.ApiTypeHelper; +import co.elastic.clients.util.ObjectBuilder; +import jakarta.json.stream.JsonGenerator; +import jakarta.json.stream.JsonParser; +import java.lang.Boolean; +import java.lang.String; +import java.util.HashMap; +import java.util.Map; +import java.util.Objects; +import java.util.function.Function; +import javax.annotation.Nullable; + +//---------------------------------------------------------------- +// THIS CODE IS GENERATED. MANUAL EDITS WILL BE LOST. +//---------------------------------------------------------------- +// +// This code is generated from the Elasticsearch API specification +// at https://github.com/elastic/elasticsearch-specification +// +// Manual updates to this file will be lost when the code is +// re-generated. +// +// If you find a property that is missing or wrongly typed, please +// open an issue or a PR on the API specification repository. +// +//---------------------------------------------------------------- + +// typedef: search_application.post_behavioral_analytics_event.Request + +/** + * Create a behavioral analytics collection event. + * + * @see API + * specification + */ +@JsonpDeserializable +public class PostBehavioralAnalyticsEventRequest extends RequestBase implements JsonpSerializable { + private final String collectionName; + + @Nullable + private final Boolean debug; + + private final EventType eventType; + + private final JsonData payload; + + // --------------------------------------------------------------------------------------------- + + private PostBehavioralAnalyticsEventRequest(Builder builder) { + + this.collectionName = ApiTypeHelper.requireNonNull(builder.collectionName, this, "collectionName"); + this.debug = builder.debug; + this.eventType = ApiTypeHelper.requireNonNull(builder.eventType, this, "eventType"); + this.payload = ApiTypeHelper.requireNonNull(builder.payload, this, "payload"); + + } + + public static PostBehavioralAnalyticsEventRequest of( + Function> fn) { + return fn.apply(new Builder()).build(); + } + + /** + * Required - The name of the behavioral analytics collection. + *

    + * API name: {@code collection_name} + */ + public final String collectionName() { + return this.collectionName; + } + + /** + * Whether the response type has to include more details + *

    + * API name: {@code debug} + */ + @Nullable + public final Boolean debug() { + return this.debug; + } + + /** + * Required - The analytics event type. + *

    + * API name: {@code event_type} + */ + public final EventType eventType() { + return this.eventType; + } + + /** + * Required - Request body. + */ + public final JsonData payload() { + return this.payload; + } + + /** + * Serialize this value to JSON. + */ + public void serialize(JsonGenerator generator, JsonpMapper mapper) { + this.payload.serialize(generator, mapper); + + } + + // --------------------------------------------------------------------------------------------- + + /** + * Builder for {@link PostBehavioralAnalyticsEventRequest}. + */ + + public static class Builder extends RequestBase.AbstractBuilder + implements + ObjectBuilder { + private String collectionName; + + @Nullable + private Boolean debug; + + private EventType eventType; + + private JsonData payload; + + /** + * Required - The name of the behavioral analytics collection. + *

    + * API name: {@code collection_name} + */ + public final Builder collectionName(String value) { + this.collectionName = value; + return this; + } + + /** + * Whether the response type has to include more details + *

    + * API name: {@code debug} + */ + public final Builder debug(@Nullable Boolean value) { + this.debug = value; + return this; + } + + /** + * Required - The analytics event type. + *

    + * API name: {@code event_type} + */ + public final Builder eventType(EventType value) { + this.eventType = value; + return this; + } + + /** + * Required - Request body. + */ + public final Builder payload(JsonData value) { + this.payload = value; + return this; + } + + @Override + public Builder withJson(JsonParser parser, JsonpMapper mapper) { + + @SuppressWarnings("unchecked") + JsonData value = (JsonData) JsonData._DESERIALIZER.deserialize(parser, mapper); + return this.payload(value); + } + + @Override + protected Builder self() { + return this; + } + + /** + * Builds a {@link PostBehavioralAnalyticsEventRequest}. + * + * @throws NullPointerException + * if some of the required fields are null. + */ + public PostBehavioralAnalyticsEventRequest build() { + _checkSingleUse(); + + return new PostBehavioralAnalyticsEventRequest(this); + } + } + + public static final JsonpDeserializer _DESERIALIZER = createPostBehavioralAnalyticsEventRequestDeserializer(); + protected static JsonpDeserializer createPostBehavioralAnalyticsEventRequestDeserializer() { + + JsonpDeserializer valueDeserializer = JsonData._DESERIALIZER; + + return JsonpDeserializer.of(valueDeserializer.acceptedEvents(), (parser, mapper, event) -> new Builder() + .payload(valueDeserializer.deserialize(parser, mapper, event)).build()); + } + + // --------------------------------------------------------------------------------------------- + + /** + * Endpoint "{@code search_application.post_behavioral_analytics_event}". + */ + public static final Endpoint _ENDPOINT = new SimpleEndpoint<>( + "es/search_application.post_behavioral_analytics_event", + + // Request method + request -> { + return "POST"; + + }, + + // Request path + request -> { + final int _eventType = 1 << 0; + final int _collectionName = 1 << 1; + + int propsSet = 0; + + propsSet |= _eventType; + propsSet |= _collectionName; + + if (propsSet == (_collectionName | _eventType)) { + StringBuilder buf = new StringBuilder(); + buf.append("/_application"); + buf.append("/analytics"); + buf.append("/"); + SimpleEndpoint.pathEncode(request.collectionName, buf); + buf.append("/event"); + buf.append("/"); + SimpleEndpoint.pathEncode(request.eventType.jsonValue(), buf); + return buf.toString(); + } + throw SimpleEndpoint.noPathTemplateFound("path"); + + }, + + // Path parameters + request -> { + Map params = new HashMap<>(); + final int _eventType = 1 << 0; + final int _collectionName = 1 << 1; + + int propsSet = 0; + + propsSet |= _eventType; + propsSet |= _collectionName; + + if (propsSet == (_collectionName | _eventType)) { + params.put("collectionName", request.collectionName); + params.put("eventType", request.eventType.jsonValue()); + } + return params; + }, + + // Request parameters + request -> { + Map params = new HashMap<>(); + if (request.debug != null) { + params.put("debug", String.valueOf(request.debug)); + } + return params; + + }, SimpleEndpoint.emptyMap(), true, PostBehavioralAnalyticsEventResponse._DESERIALIZER); +} diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/search_application/PostBehavioralAnalyticsEventResponse.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/search_application/PostBehavioralAnalyticsEventResponse.java new file mode 100644 index 000000000..76a070d5a --- /dev/null +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/search_application/PostBehavioralAnalyticsEventResponse.java @@ -0,0 +1,190 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package co.elastic.clients.elasticsearch.search_application; + +import co.elastic.clients.json.JsonData; +import co.elastic.clients.json.JsonpDeserializable; +import co.elastic.clients.json.JsonpDeserializer; +import co.elastic.clients.json.JsonpMapper; +import co.elastic.clients.json.JsonpSerializable; +import co.elastic.clients.json.JsonpUtils; +import co.elastic.clients.json.ObjectBuilderDeserializer; +import co.elastic.clients.json.ObjectDeserializer; +import co.elastic.clients.util.ApiTypeHelper; +import co.elastic.clients.util.ObjectBuilder; +import co.elastic.clients.util.WithJsonObjectBuilderBase; +import jakarta.json.stream.JsonGenerator; +import java.lang.Boolean; +import java.util.Objects; +import java.util.function.Function; +import javax.annotation.Nullable; + +//---------------------------------------------------------------- +// THIS CODE IS GENERATED. MANUAL EDITS WILL BE LOST. +//---------------------------------------------------------------- +// +// This code is generated from the Elasticsearch API specification +// at https://github.com/elastic/elasticsearch-specification +// +// Manual updates to this file will be lost when the code is +// re-generated. +// +// If you find a property that is missing or wrongly typed, please +// open an issue or a PR on the API specification repository. +// +//---------------------------------------------------------------- + +// typedef: search_application.post_behavioral_analytics_event.Response + +/** + * + * @see API + * specification + */ +@JsonpDeserializable +public class PostBehavioralAnalyticsEventResponse implements JsonpSerializable { + private final boolean accepted; + + @Nullable + private final JsonData event; + + // --------------------------------------------------------------------------------------------- + + private PostBehavioralAnalyticsEventResponse(Builder builder) { + + this.accepted = ApiTypeHelper.requireNonNull(builder.accepted, this, "accepted"); + this.event = builder.event; + + } + + public static PostBehavioralAnalyticsEventResponse of( + Function> fn) { + return fn.apply(new Builder()).build(); + } + + /** + * Required - API name: {@code accepted} + */ + public final boolean accepted() { + return this.accepted; + } + + /** + * API name: {@code event} + */ + @Nullable + public final JsonData event() { + return this.event; + } + + /** + * Serialize this object to JSON. + */ + public void serialize(JsonGenerator generator, JsonpMapper mapper) { + generator.writeStartObject(); + serializeInternal(generator, mapper); + generator.writeEnd(); + } + + protected void serializeInternal(JsonGenerator generator, JsonpMapper mapper) { + + generator.writeKey("accepted"); + generator.write(this.accepted); + + if (this.event != null) { + generator.writeKey("event"); + this.event.serialize(generator, mapper); + + } + + } + + @Override + public String toString() { + return JsonpUtils.toString(this); + } + + // --------------------------------------------------------------------------------------------- + + /** + * Builder for {@link PostBehavioralAnalyticsEventResponse}. + */ + + public static class Builder extends WithJsonObjectBuilderBase + implements + ObjectBuilder { + private Boolean accepted; + + @Nullable + private JsonData event; + + /** + * Required - API name: {@code accepted} + */ + public final Builder accepted(boolean value) { + this.accepted = value; + return this; + } + + /** + * API name: {@code event} + */ + public final Builder event(@Nullable JsonData value) { + this.event = value; + return this; + } + + @Override + protected Builder self() { + return this; + } + + /** + * Builds a {@link PostBehavioralAnalyticsEventResponse}. + * + * @throws NullPointerException + * if some of the required fields are null. + */ + public PostBehavioralAnalyticsEventResponse build() { + _checkSingleUse(); + + return new PostBehavioralAnalyticsEventResponse(this); + } + } + + // --------------------------------------------------------------------------------------------- + + /** + * Json deserializer for {@link PostBehavioralAnalyticsEventResponse} + */ + public static final JsonpDeserializer _DESERIALIZER = ObjectBuilderDeserializer + .lazy(Builder::new, + PostBehavioralAnalyticsEventResponse::setupPostBehavioralAnalyticsEventResponseDeserializer); + + protected static void setupPostBehavioralAnalyticsEventResponseDeserializer( + ObjectDeserializer op) { + + op.add(Builder::accepted, JsonpDeserializer.booleanDeserializer(), "accepted"); + op.add(Builder::event, JsonData._DESERIALIZER, "event"); + + } + +} diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/search_application/RenderQueryRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/search_application/RenderQueryRequest.java new file mode 100644 index 000000000..8a0ec8779 --- /dev/null +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/search_application/RenderQueryRequest.java @@ -0,0 +1,268 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package co.elastic.clients.elasticsearch.search_application; + +import co.elastic.clients.elasticsearch._types.ErrorResponse; +import co.elastic.clients.elasticsearch._types.RequestBase; +import co.elastic.clients.json.JsonData; +import co.elastic.clients.json.JsonpDeserializable; +import co.elastic.clients.json.JsonpDeserializer; +import co.elastic.clients.json.JsonpMapper; +import co.elastic.clients.json.JsonpSerializable; +import co.elastic.clients.json.ObjectBuilderDeserializer; +import co.elastic.clients.json.ObjectDeserializer; +import co.elastic.clients.transport.Endpoint; +import co.elastic.clients.transport.endpoints.SimpleEndpoint; +import co.elastic.clients.util.ApiTypeHelper; +import co.elastic.clients.util.ObjectBuilder; +import jakarta.json.stream.JsonGenerator; +import java.lang.String; +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; +import java.util.Objects; +import java.util.function.Function; +import javax.annotation.Nullable; + +//---------------------------------------------------------------- +// THIS CODE IS GENERATED. MANUAL EDITS WILL BE LOST. +//---------------------------------------------------------------- +// +// This code is generated from the Elasticsearch API specification +// at https://github.com/elastic/elasticsearch-specification +// +// Manual updates to this file will be lost when the code is +// re-generated. +// +// If you find a property that is missing or wrongly typed, please +// open an issue or a PR on the API specification repository. +// +//---------------------------------------------------------------- + +// typedef: search_application.render_query.Request + +/** + * Render a search application query. Generate an Elasticsearch query using the + * specified query parameters and the search template associated with the search + * application or a default template if none is specified. If a parameter used + * in the search template is not specified in params, the + * parameter's default value will be used. The API returns the specific + * Elasticsearch query that would be generated and run by calling the search + * application search API. + *

    + * You must have read privileges on the backing alias of the search + * application. + * + * @see API + * specification + */ +@JsonpDeserializable +public class RenderQueryRequest extends RequestBase implements JsonpSerializable { + private final String name; + + private final Map params; + + // --------------------------------------------------------------------------------------------- + + private RenderQueryRequest(Builder builder) { + + this.name = ApiTypeHelper.requireNonNull(builder.name, this, "name"); + this.params = ApiTypeHelper.unmodifiable(builder.params); + + } + + public static RenderQueryRequest of(Function> fn) { + return fn.apply(new Builder()).build(); + } + + /** + * Required - The name of the search application to render teh query for. + *

    + * API name: {@code name} + */ + public final String name() { + return this.name; + } + + /** + * API name: {@code params} + */ + public final Map params() { + return this.params; + } + + /** + * Serialize this object to JSON. + */ + public void serialize(JsonGenerator generator, JsonpMapper mapper) { + generator.writeStartObject(); + serializeInternal(generator, mapper); + generator.writeEnd(); + } + + protected void serializeInternal(JsonGenerator generator, JsonpMapper mapper) { + + if (ApiTypeHelper.isDefined(this.params)) { + generator.writeKey("params"); + generator.writeStartObject(); + for (Map.Entry item0 : this.params.entrySet()) { + generator.writeKey(item0.getKey()); + item0.getValue().serialize(generator, mapper); + + } + generator.writeEnd(); + + } + + } + + // --------------------------------------------------------------------------------------------- + + /** + * Builder for {@link RenderQueryRequest}. + */ + + public static class Builder extends RequestBase.AbstractBuilder + implements + ObjectBuilder { + private String name; + + @Nullable + private Map params; + + /** + * Required - The name of the search application to render teh query for. + *

    + * API name: {@code name} + */ + public final Builder name(String value) { + this.name = value; + return this; + } + + /** + * API name: {@code params} + *

    + * Adds all entries of map to params. + */ + public final Builder params(Map map) { + this.params = _mapPutAll(this.params, map); + return this; + } + + /** + * API name: {@code params} + *

    + * Adds an entry to params. + */ + public final Builder params(String key, JsonData value) { + this.params = _mapPut(this.params, key, value); + return this; + } + + @Override + protected Builder self() { + return this; + } + + /** + * Builds a {@link RenderQueryRequest}. + * + * @throws NullPointerException + * if some of the required fields are null. + */ + public RenderQueryRequest build() { + _checkSingleUse(); + + return new RenderQueryRequest(this); + } + } + + // --------------------------------------------------------------------------------------------- + + /** + * Json deserializer for {@link RenderQueryRequest} + */ + public static final JsonpDeserializer _DESERIALIZER = ObjectBuilderDeserializer + .lazy(Builder::new, RenderQueryRequest::setupRenderQueryRequestDeserializer); + + protected static void setupRenderQueryRequestDeserializer(ObjectDeserializer op) { + + op.add(Builder::params, JsonpDeserializer.stringMapDeserializer(JsonData._DESERIALIZER), "params"); + + } + + // --------------------------------------------------------------------------------------------- + + /** + * Endpoint "{@code search_application.render_query}". + */ + public static final Endpoint _ENDPOINT = new SimpleEndpoint<>( + "es/search_application.render_query", + + // Request method + request -> { + return "POST"; + + }, + + // Request path + request -> { + final int _name = 1 << 0; + + int propsSet = 0; + + propsSet |= _name; + + if (propsSet == (_name)) { + StringBuilder buf = new StringBuilder(); + buf.append("/_application"); + buf.append("/search_application"); + buf.append("/"); + SimpleEndpoint.pathEncode(request.name, buf); + buf.append("/_render_query"); + return buf.toString(); + } + throw SimpleEndpoint.noPathTemplateFound("path"); + + }, + + // Path parameters + request -> { + Map params = new HashMap<>(); + final int _name = 1 << 0; + + int propsSet = 0; + + propsSet |= _name; + + if (propsSet == (_name)) { + params.put("name", request.name); + } + return params; + }, + + // Request parameters + request -> { + return Collections.emptyMap(); + + }, SimpleEndpoint.emptyMap(), true, RenderQueryResponse._DESERIALIZER); +} diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/search_application/RenderQueryResponse.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/search_application/RenderQueryResponse.java new file mode 100644 index 000000000..42b822813 --- /dev/null +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/search_application/RenderQueryResponse.java @@ -0,0 +1,66 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package co.elastic.clients.elasticsearch.search_application; + +import co.elastic.clients.json.JsonpDeserializable; +import co.elastic.clients.json.JsonpDeserializer; +import co.elastic.clients.json.ObjectBuilderDeserializer; +import co.elastic.clients.json.ObjectDeserializer; +import co.elastic.clients.util.ObjectBuilder; +import jakarta.json.stream.JsonGenerator; +import java.util.Objects; + +//---------------------------------------------------------------- +// THIS CODE IS GENERATED. MANUAL EDITS WILL BE LOST. +//---------------------------------------------------------------- +// +// This code is generated from the Elasticsearch API specification +// at https://github.com/elastic/elasticsearch-specification +// +// Manual updates to this file will be lost when the code is +// re-generated. +// +// If you find a property that is missing or wrongly typed, please +// open an issue or a PR on the API specification repository. +// +//---------------------------------------------------------------- + +// typedef: search_application.render_query.Response + +/** + * + * @see API + * specification + */ + +public class RenderQueryResponse { + public RenderQueryResponse() { + } + + /** + * Singleton instance for {@link RenderQueryResponse}. + */ + public static final RenderQueryResponse _INSTANCE = new RenderQueryResponse(); + + public static final JsonpDeserializer _DESERIALIZER = JsonpDeserializer + .emptyObject(RenderQueryResponse._INSTANCE); + +} diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/searchable_snapshots/CacheStatsRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/searchable_snapshots/CacheStatsRequest.java index c807e9642..0d8d16b7c 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/searchable_snapshots/CacheStatsRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/searchable_snapshots/CacheStatsRequest.java @@ -58,7 +58,8 @@ // typedef: searchable_snapshots.cache_stats.Request /** - * Retrieve node-level cache statistics about searchable snapshots. + * Get cache statistics. Get statistics about the shared cache for partially + * mounted indices. * * @see API diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/searchable_snapshots/ClearCacheRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/searchable_snapshots/ClearCacheRequest.java index 771e420e7..d5362ffb4 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/searchable_snapshots/ClearCacheRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/searchable_snapshots/ClearCacheRequest.java @@ -59,7 +59,8 @@ // typedef: searchable_snapshots.clear_cache.Request /** - * Clear the cache of searchable snapshots. + * Clear the cache. Clear indices and data streams from the shared cache for + * partially mounted indices. * * @see API diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/searchable_snapshots/ElasticsearchSearchableSnapshotsAsyncClient.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/searchable_snapshots/ElasticsearchSearchableSnapshotsAsyncClient.java index 175f5f222..338f1d307 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/searchable_snapshots/ElasticsearchSearchableSnapshotsAsyncClient.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/searchable_snapshots/ElasticsearchSearchableSnapshotsAsyncClient.java @@ -71,7 +71,8 @@ public ElasticsearchSearchableSnapshotsAsyncClient withTransportOptions( // ----- Endpoint: searchable_snapshots.cache_stats /** - * Retrieve node-level cache statistics about searchable snapshots. + * Get cache statistics. Get statistics about the shared cache for partially + * mounted indices. * * @see Documentation @@ -86,7 +87,8 @@ public CompletableFuture cacheStats(CacheStatsRequest reques } /** - * Retrieve node-level cache statistics about searchable snapshots. + * Get cache statistics. Get statistics about the shared cache for partially + * mounted indices. * * @param fn * a function that initializes a builder to create the @@ -102,7 +104,8 @@ public final CompletableFuture cacheStats( } /** - * Retrieve node-level cache statistics about searchable snapshots. + * Get cache statistics. Get statistics about the shared cache for partially + * mounted indices. * * @see Documentation @@ -117,7 +120,8 @@ public CompletableFuture cacheStats() { // ----- Endpoint: searchable_snapshots.clear_cache /** - * Clear the cache of searchable snapshots. + * Clear the cache. Clear indices and data streams from the shared cache for + * partially mounted indices. * * @see Documentation @@ -132,7 +136,8 @@ public CompletableFuture clearCache(ClearCacheRequest reques } /** - * Clear the cache of searchable snapshots. + * Clear the cache. Clear indices and data streams from the shared cache for + * partially mounted indices. * * @param fn * a function that initializes a builder to create the @@ -148,7 +153,8 @@ public final CompletableFuture clearCache( } /** - * Clear the cache of searchable snapshots. + * Clear the cache. Clear indices and data streams from the shared cache for + * partially mounted indices. * * @see Documentation @@ -163,7 +169,9 @@ public CompletableFuture clearCache() { // ----- Endpoint: searchable_snapshots.mount /** - * Mount a snapshot as a searchable index. + * Mount a snapshot. Mount a snapshot as a searchable snapshot index. Do not use + * this API for snapshots managed by index lifecycle management (ILM). Manually + * mounting ILM-managed snapshots can interfere with ILM processes. * * @see Documentation @@ -178,7 +186,9 @@ public CompletableFuture mount(MountRequest request) { } /** - * Mount a snapshot as a searchable index. + * Mount a snapshot. Mount a snapshot as a searchable snapshot index. Do not use + * this API for snapshots managed by index lifecycle management (ILM). Manually + * mounting ILM-managed snapshots can interfere with ILM processes. * * @param fn * a function that initializes a builder to create the @@ -196,7 +206,7 @@ public final CompletableFuture mount( // ----- Endpoint: searchable_snapshots.stats /** - * Retrieve shard-level statistics about searchable snapshots. + * Get searchable snapshot statistics. * * @see Documentation @@ -211,7 +221,7 @@ public CompletableFuture stats(SearchableSnaps } /** - * Retrieve shard-level statistics about searchable snapshots. + * Get searchable snapshot statistics. * * @param fn * a function that initializes a builder to create the @@ -227,7 +237,7 @@ public final CompletableFuture stats( } /** - * Retrieve shard-level statistics about searchable snapshots. + * Get searchable snapshot statistics. * * @see Documentation diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/searchable_snapshots/ElasticsearchSearchableSnapshotsClient.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/searchable_snapshots/ElasticsearchSearchableSnapshotsClient.java index 3280a28f0..6f57e841e 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/searchable_snapshots/ElasticsearchSearchableSnapshotsClient.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/searchable_snapshots/ElasticsearchSearchableSnapshotsClient.java @@ -71,7 +71,8 @@ public ElasticsearchSearchableSnapshotsClient withTransportOptions(@Nullable Tra // ----- Endpoint: searchable_snapshots.cache_stats /** - * Retrieve node-level cache statistics about searchable snapshots. + * Get cache statistics. Get statistics about the shared cache for partially + * mounted indices. * * @see Documentation @@ -86,7 +87,8 @@ public CacheStatsResponse cacheStats(CacheStatsRequest request) throws IOExcepti } /** - * Retrieve node-level cache statistics about searchable snapshots. + * Get cache statistics. Get statistics about the shared cache for partially + * mounted indices. * * @param fn * a function that initializes a builder to create the @@ -102,7 +104,8 @@ public final CacheStatsResponse cacheStats(FunctionDocumentation @@ -117,7 +120,8 @@ public CacheStatsResponse cacheStats() throws IOException, ElasticsearchExceptio // ----- Endpoint: searchable_snapshots.clear_cache /** - * Clear the cache of searchable snapshots. + * Clear the cache. Clear indices and data streams from the shared cache for + * partially mounted indices. * * @see Documentation @@ -132,7 +136,8 @@ public ClearCacheResponse clearCache(ClearCacheRequest request) throws IOExcepti } /** - * Clear the cache of searchable snapshots. + * Clear the cache. Clear indices and data streams from the shared cache for + * partially mounted indices. * * @param fn * a function that initializes a builder to create the @@ -148,7 +153,8 @@ public final ClearCacheResponse clearCache(FunctionDocumentation @@ -163,7 +169,9 @@ public ClearCacheResponse clearCache() throws IOException, ElasticsearchExceptio // ----- Endpoint: searchable_snapshots.mount /** - * Mount a snapshot as a searchable index. + * Mount a snapshot. Mount a snapshot as a searchable snapshot index. Do not use + * this API for snapshots managed by index lifecycle management (ILM). Manually + * mounting ILM-managed snapshots can interfere with ILM processes. * * @see Documentation @@ -178,7 +186,9 @@ public MountResponse mount(MountRequest request) throws IOException, Elasticsear } /** - * Mount a snapshot as a searchable index. + * Mount a snapshot. Mount a snapshot as a searchable snapshot index. Do not use + * this API for snapshots managed by index lifecycle management (ILM). Manually + * mounting ILM-managed snapshots can interfere with ILM processes. * * @param fn * a function that initializes a builder to create the @@ -196,7 +206,7 @@ public final MountResponse mount(FunctionDocumentation @@ -212,7 +222,7 @@ public SearchableSnapshotsStatsResponse stats(SearchableSnapshotsStatsRequest re } /** - * Retrieve shard-level statistics about searchable snapshots. + * Get searchable snapshot statistics. * * @param fn * a function that initializes a builder to create the @@ -229,7 +239,7 @@ public final SearchableSnapshotsStatsResponse stats( } /** - * Retrieve shard-level statistics about searchable snapshots. + * Get searchable snapshot statistics. * * @see Documentation diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/searchable_snapshots/MountRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/searchable_snapshots/MountRequest.java index aa2964a80..1cd7ce354 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/searchable_snapshots/MountRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/searchable_snapshots/MountRequest.java @@ -61,7 +61,9 @@ // typedef: searchable_snapshots.mount.Request /** - * Mount a snapshot as a searchable index. + * Mount a snapshot. Mount a snapshot as a searchable snapshot index. Do not use + * this API for snapshots managed by index lifecycle management (ILM). Manually + * mounting ILM-managed snapshots can interfere with ILM processes. * * @see API diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/searchable_snapshots/SearchableSnapshotsStatsRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/searchable_snapshots/SearchableSnapshotsStatsRequest.java index 33630a60d..7b37b9e90 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/searchable_snapshots/SearchableSnapshotsStatsRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/searchable_snapshots/SearchableSnapshotsStatsRequest.java @@ -57,7 +57,7 @@ // typedef: searchable_snapshots.stats.Request /** - * Retrieve shard-level statistics about searchable snapshots. + * Get searchable snapshot statistics. * * @see API diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/security/BulkUpdateApiKeysRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/security/BulkUpdateApiKeysRequest.java new file mode 100644 index 000000000..5b4d2035c --- /dev/null +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/security/BulkUpdateApiKeysRequest.java @@ -0,0 +1,449 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package co.elastic.clients.elasticsearch.security; + +import co.elastic.clients.elasticsearch._types.ErrorResponse; +import co.elastic.clients.elasticsearch._types.RequestBase; +import co.elastic.clients.elasticsearch._types.Time; +import co.elastic.clients.json.JsonData; +import co.elastic.clients.json.JsonpDeserializable; +import co.elastic.clients.json.JsonpDeserializer; +import co.elastic.clients.json.JsonpMapper; +import co.elastic.clients.json.JsonpSerializable; +import co.elastic.clients.json.ObjectBuilderDeserializer; +import co.elastic.clients.json.ObjectDeserializer; +import co.elastic.clients.transport.Endpoint; +import co.elastic.clients.transport.endpoints.SimpleEndpoint; +import co.elastic.clients.util.ApiTypeHelper; +import co.elastic.clients.util.ObjectBuilder; +import jakarta.json.stream.JsonGenerator; +import java.lang.String; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.function.Function; +import javax.annotation.Nullable; + +//---------------------------------------------------------------- +// THIS CODE IS GENERATED. MANUAL EDITS WILL BE LOST. +//---------------------------------------------------------------- +// +// This code is generated from the Elasticsearch API specification +// at https://github.com/elastic/elasticsearch-specification +// +// Manual updates to this file will be lost when the code is +// re-generated. +// +// If you find a property that is missing or wrongly typed, please +// open an issue or a PR on the API specification repository. +// +//---------------------------------------------------------------- + +// typedef: security.bulk_update_api_keys.Request + +/** + * Bulk update API keys. Update the attributes for multiple API keys. + *

    + * IMPORTANT: It is not possible to use an API key as the authentication + * credential for this API. To update API keys, the owner user's credentials are + * required. + *

    + * This API is similar to the update API key API but enables you to apply the + * same update to multiple API keys in one API call. This operation can greatly + * improve performance over making individual updates. + *

    + * It is not possible to update expired or invalidated API keys. + *

    + * This API supports updates to API key access scope, metadata and expiration. + * The access scope of each API key is derived from the + * role_descriptors you specify in the request and a snapshot of + * the owner user's permissions at the time of the request. The snapshot of the + * owner's permissions is updated automatically on every call. + *

    + * IMPORTANT: If you don't specify role_descriptors in the request, + * a call to this API might still change an API key's access scope. This change + * can occur if the owner user's permissions have changed since the API key was + * created or last modified. + *

    + * A successful request returns a JSON structure that contains the IDs of all + * updated API keys, the IDs of API keys that already had the requested changes + * and did not require an update, and error details for any failed update. + * + * @see API + * specification + */ +@JsonpDeserializable +public class BulkUpdateApiKeysRequest extends RequestBase implements JsonpSerializable { + @Nullable + private final Time expiration; + + private final List ids; + + private final Map metadata; + + private final Map roleDescriptors; + + // --------------------------------------------------------------------------------------------- + + private BulkUpdateApiKeysRequest(Builder builder) { + + this.expiration = builder.expiration; + this.ids = ApiTypeHelper.unmodifiableRequired(builder.ids, this, "ids"); + this.metadata = ApiTypeHelper.unmodifiable(builder.metadata); + this.roleDescriptors = ApiTypeHelper.unmodifiable(builder.roleDescriptors); + + } + + public static BulkUpdateApiKeysRequest of(Function> fn) { + return fn.apply(new Builder()).build(); + } + + /** + * Expiration time for the API keys. By default, API keys never expire. This + * property can be omitted to leave the value unchanged. + *

    + * API name: {@code expiration} + */ + @Nullable + public final Time expiration() { + return this.expiration; + } + + /** + * Required - The API key identifiers. + *

    + * API name: {@code ids} + */ + public final List ids() { + return this.ids; + } + + /** + * Arbitrary nested metadata to associate with the API keys. Within the + * metadata object, top-level keys beginning with an underscore + * (_) are reserved for system usage. Any information specified + * with this parameter fully replaces metadata previously associated with the + * API key. + *

    + * API name: {@code metadata} + */ + public final Map metadata() { + return this.metadata; + } + + /** + * The role descriptors to assign to the API keys. An API key's effective + * permissions are an intersection of its assigned privileges and the + * point-in-time snapshot of permissions of the owner user. You can assign new + * privileges by specifying them in this parameter. To remove assigned + * privileges, supply the role_descriptors parameter as an empty + * object {}. If an API key has no assigned privileges, it inherits + * the owner user's full permissions. The snapshot of the owner's permissions is + * always updated, whether you supply the role_descriptors + * parameter. The structure of a role descriptor is the same as the request for + * the create API keys API. + *

    + * API name: {@code role_descriptors} + */ + public final Map roleDescriptors() { + return this.roleDescriptors; + } + + /** + * Serialize this object to JSON. + */ + public void serialize(JsonGenerator generator, JsonpMapper mapper) { + generator.writeStartObject(); + serializeInternal(generator, mapper); + generator.writeEnd(); + } + + protected void serializeInternal(JsonGenerator generator, JsonpMapper mapper) { + + if (this.expiration != null) { + generator.writeKey("expiration"); + this.expiration.serialize(generator, mapper); + + } + if (ApiTypeHelper.isDefined(this.ids)) { + generator.writeKey("ids"); + generator.writeStartArray(); + for (String item0 : this.ids) { + generator.write(item0); + + } + generator.writeEnd(); + + } + if (ApiTypeHelper.isDefined(this.metadata)) { + generator.writeKey("metadata"); + generator.writeStartObject(); + for (Map.Entry item0 : this.metadata.entrySet()) { + generator.writeKey(item0.getKey()); + item0.getValue().serialize(generator, mapper); + + } + generator.writeEnd(); + + } + if (ApiTypeHelper.isDefined(this.roleDescriptors)) { + generator.writeKey("role_descriptors"); + generator.writeStartObject(); + for (Map.Entry item0 : this.roleDescriptors.entrySet()) { + generator.writeKey(item0.getKey()); + item0.getValue().serialize(generator, mapper); + + } + generator.writeEnd(); + + } + + } + + // --------------------------------------------------------------------------------------------- + + /** + * Builder for {@link BulkUpdateApiKeysRequest}. + */ + + public static class Builder extends RequestBase.AbstractBuilder + implements + ObjectBuilder { + @Nullable + private Time expiration; + + private List ids; + + @Nullable + private Map metadata; + + @Nullable + private Map roleDescriptors; + + /** + * Expiration time for the API keys. By default, API keys never expire. This + * property can be omitted to leave the value unchanged. + *

    + * API name: {@code expiration} + */ + public final Builder expiration(@Nullable Time value) { + this.expiration = value; + return this; + } + + /** + * Expiration time for the API keys. By default, API keys never expire. This + * property can be omitted to leave the value unchanged. + *

    + * API name: {@code expiration} + */ + public final Builder expiration(Function> fn) { + return this.expiration(fn.apply(new Time.Builder()).build()); + } + + /** + * Required - The API key identifiers. + *

    + * API name: {@code ids} + *

    + * Adds all elements of list to ids. + */ + public final Builder ids(List list) { + this.ids = _listAddAll(this.ids, list); + return this; + } + + /** + * Required - The API key identifiers. + *

    + * API name: {@code ids} + *

    + * Adds one or more values to ids. + */ + public final Builder ids(String value, String... values) { + this.ids = _listAdd(this.ids, value, values); + return this; + } + + /** + * Arbitrary nested metadata to associate with the API keys. Within the + * metadata object, top-level keys beginning with an underscore + * (_) are reserved for system usage. Any information specified + * with this parameter fully replaces metadata previously associated with the + * API key. + *

    + * API name: {@code metadata} + *

    + * Adds all entries of map to metadata. + */ + public final Builder metadata(Map map) { + this.metadata = _mapPutAll(this.metadata, map); + return this; + } + + /** + * Arbitrary nested metadata to associate with the API keys. Within the + * metadata object, top-level keys beginning with an underscore + * (_) are reserved for system usage. Any information specified + * with this parameter fully replaces metadata previously associated with the + * API key. + *

    + * API name: {@code metadata} + *

    + * Adds an entry to metadata. + */ + public final Builder metadata(String key, JsonData value) { + this.metadata = _mapPut(this.metadata, key, value); + return this; + } + + /** + * The role descriptors to assign to the API keys. An API key's effective + * permissions are an intersection of its assigned privileges and the + * point-in-time snapshot of permissions of the owner user. You can assign new + * privileges by specifying them in this parameter. To remove assigned + * privileges, supply the role_descriptors parameter as an empty + * object {}. If an API key has no assigned privileges, it inherits + * the owner user's full permissions. The snapshot of the owner's permissions is + * always updated, whether you supply the role_descriptors + * parameter. The structure of a role descriptor is the same as the request for + * the create API keys API. + *

    + * API name: {@code role_descriptors} + *

    + * Adds all entries of map to roleDescriptors. + */ + public final Builder roleDescriptors(Map map) { + this.roleDescriptors = _mapPutAll(this.roleDescriptors, map); + return this; + } + + /** + * The role descriptors to assign to the API keys. An API key's effective + * permissions are an intersection of its assigned privileges and the + * point-in-time snapshot of permissions of the owner user. You can assign new + * privileges by specifying them in this parameter. To remove assigned + * privileges, supply the role_descriptors parameter as an empty + * object {}. If an API key has no assigned privileges, it inherits + * the owner user's full permissions. The snapshot of the owner's permissions is + * always updated, whether you supply the role_descriptors + * parameter. The structure of a role descriptor is the same as the request for + * the create API keys API. + *

    + * API name: {@code role_descriptors} + *

    + * Adds an entry to roleDescriptors. + */ + public final Builder roleDescriptors(String key, RoleDescriptor value) { + this.roleDescriptors = _mapPut(this.roleDescriptors, key, value); + return this; + } + + /** + * The role descriptors to assign to the API keys. An API key's effective + * permissions are an intersection of its assigned privileges and the + * point-in-time snapshot of permissions of the owner user. You can assign new + * privileges by specifying them in this parameter. To remove assigned + * privileges, supply the role_descriptors parameter as an empty + * object {}. If an API key has no assigned privileges, it inherits + * the owner user's full permissions. The snapshot of the owner's permissions is + * always updated, whether you supply the role_descriptors + * parameter. The structure of a role descriptor is the same as the request for + * the create API keys API. + *

    + * API name: {@code role_descriptors} + *

    + * Adds an entry to roleDescriptors using a builder lambda. + */ + public final Builder roleDescriptors(String key, + Function> fn) { + return roleDescriptors(key, fn.apply(new RoleDescriptor.Builder()).build()); + } + + @Override + protected Builder self() { + return this; + } + + /** + * Builds a {@link BulkUpdateApiKeysRequest}. + * + * @throws NullPointerException + * if some of the required fields are null. + */ + public BulkUpdateApiKeysRequest build() { + _checkSingleUse(); + + return new BulkUpdateApiKeysRequest(this); + } + } + + // --------------------------------------------------------------------------------------------- + + /** + * Json deserializer for {@link BulkUpdateApiKeysRequest} + */ + public static final JsonpDeserializer _DESERIALIZER = ObjectBuilderDeserializer + .lazy(Builder::new, BulkUpdateApiKeysRequest::setupBulkUpdateApiKeysRequestDeserializer); + + protected static void setupBulkUpdateApiKeysRequestDeserializer( + ObjectDeserializer op) { + + op.add(Builder::expiration, Time._DESERIALIZER, "expiration"); + op.add(Builder::ids, JsonpDeserializer.arrayDeserializer(JsonpDeserializer.stringDeserializer()), "ids"); + op.add(Builder::metadata, JsonpDeserializer.stringMapDeserializer(JsonData._DESERIALIZER), "metadata"); + op.add(Builder::roleDescriptors, JsonpDeserializer.stringMapDeserializer(RoleDescriptor._DESERIALIZER), + "role_descriptors"); + + } + + // --------------------------------------------------------------------------------------------- + + /** + * Endpoint "{@code security.bulk_update_api_keys}". + */ + public static final Endpoint _ENDPOINT = new SimpleEndpoint<>( + "es/security.bulk_update_api_keys", + + // Request method + request -> { + return "POST"; + + }, + + // Request path + request -> { + return "/_security/api_key/_bulk_update"; + + }, + + // Path parameters + request -> { + return Collections.emptyMap(); + }, + + // Request parameters + request -> { + return Collections.emptyMap(); + + }, SimpleEndpoint.emptyMap(), true, BulkUpdateApiKeysResponse._DESERIALIZER); +} diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/security/BulkUpdateApiKeysResponse.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/security/BulkUpdateApiKeysResponse.java new file mode 100644 index 000000000..fa4702754 --- /dev/null +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/security/BulkUpdateApiKeysResponse.java @@ -0,0 +1,258 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package co.elastic.clients.elasticsearch.security; + +import co.elastic.clients.json.JsonpDeserializable; +import co.elastic.clients.json.JsonpDeserializer; +import co.elastic.clients.json.JsonpMapper; +import co.elastic.clients.json.JsonpSerializable; +import co.elastic.clients.json.JsonpUtils; +import co.elastic.clients.json.ObjectBuilderDeserializer; +import co.elastic.clients.json.ObjectDeserializer; +import co.elastic.clients.util.ApiTypeHelper; +import co.elastic.clients.util.ObjectBuilder; +import co.elastic.clients.util.WithJsonObjectBuilderBase; +import jakarta.json.stream.JsonGenerator; +import java.lang.String; +import java.util.List; +import java.util.Objects; +import java.util.function.Function; +import javax.annotation.Nullable; + +//---------------------------------------------------------------- +// THIS CODE IS GENERATED. MANUAL EDITS WILL BE LOST. +//---------------------------------------------------------------- +// +// This code is generated from the Elasticsearch API specification +// at https://github.com/elastic/elasticsearch-specification +// +// Manual updates to this file will be lost when the code is +// re-generated. +// +// If you find a property that is missing or wrongly typed, please +// open an issue or a PR on the API specification repository. +// +//---------------------------------------------------------------- + +// typedef: security.bulk_update_api_keys.Response + +/** + * + * @see API + * specification + */ +@JsonpDeserializable +public class BulkUpdateApiKeysResponse implements JsonpSerializable { + @Nullable + private final BulkError errors; + + private final List noops; + + private final List updated; + + // --------------------------------------------------------------------------------------------- + + private BulkUpdateApiKeysResponse(Builder builder) { + + this.errors = builder.errors; + this.noops = ApiTypeHelper.unmodifiableRequired(builder.noops, this, "noops"); + this.updated = ApiTypeHelper.unmodifiableRequired(builder.updated, this, "updated"); + + } + + public static BulkUpdateApiKeysResponse of(Function> fn) { + return fn.apply(new Builder()).build(); + } + + /** + * API name: {@code errors} + */ + @Nullable + public final BulkError errors() { + return this.errors; + } + + /** + * Required - API name: {@code noops} + */ + public final List noops() { + return this.noops; + } + + /** + * Required - API name: {@code updated} + */ + public final List updated() { + return this.updated; + } + + /** + * Serialize this object to JSON. + */ + public void serialize(JsonGenerator generator, JsonpMapper mapper) { + generator.writeStartObject(); + serializeInternal(generator, mapper); + generator.writeEnd(); + } + + protected void serializeInternal(JsonGenerator generator, JsonpMapper mapper) { + + if (this.errors != null) { + generator.writeKey("errors"); + this.errors.serialize(generator, mapper); + + } + if (ApiTypeHelper.isDefined(this.noops)) { + generator.writeKey("noops"); + generator.writeStartArray(); + for (String item0 : this.noops) { + generator.write(item0); + + } + generator.writeEnd(); + + } + if (ApiTypeHelper.isDefined(this.updated)) { + generator.writeKey("updated"); + generator.writeStartArray(); + for (String item0 : this.updated) { + generator.write(item0); + + } + generator.writeEnd(); + + } + + } + + @Override + public String toString() { + return JsonpUtils.toString(this); + } + + // --------------------------------------------------------------------------------------------- + + /** + * Builder for {@link BulkUpdateApiKeysResponse}. + */ + + public static class Builder extends WithJsonObjectBuilderBase + implements + ObjectBuilder { + @Nullable + private BulkError errors; + + private List noops; + + private List updated; + + /** + * API name: {@code errors} + */ + public final Builder errors(@Nullable BulkError value) { + this.errors = value; + return this; + } + + /** + * API name: {@code errors} + */ + public final Builder errors(Function> fn) { + return this.errors(fn.apply(new BulkError.Builder()).build()); + } + + /** + * Required - API name: {@code noops} + *

    + * Adds all elements of list to noops. + */ + public final Builder noops(List list) { + this.noops = _listAddAll(this.noops, list); + return this; + } + + /** + * Required - API name: {@code noops} + *

    + * Adds one or more values to noops. + */ + public final Builder noops(String value, String... values) { + this.noops = _listAdd(this.noops, value, values); + return this; + } + + /** + * Required - API name: {@code updated} + *

    + * Adds all elements of list to updated. + */ + public final Builder updated(List list) { + this.updated = _listAddAll(this.updated, list); + return this; + } + + /** + * Required - API name: {@code updated} + *

    + * Adds one or more values to updated. + */ + public final Builder updated(String value, String... values) { + this.updated = _listAdd(this.updated, value, values); + return this; + } + + @Override + protected Builder self() { + return this; + } + + /** + * Builds a {@link BulkUpdateApiKeysResponse}. + * + * @throws NullPointerException + * if some of the required fields are null. + */ + public BulkUpdateApiKeysResponse build() { + _checkSingleUse(); + + return new BulkUpdateApiKeysResponse(this); + } + } + + // --------------------------------------------------------------------------------------------- + + /** + * Json deserializer for {@link BulkUpdateApiKeysResponse} + */ + public static final JsonpDeserializer _DESERIALIZER = ObjectBuilderDeserializer + .lazy(Builder::new, BulkUpdateApiKeysResponse::setupBulkUpdateApiKeysResponseDeserializer); + + protected static void setupBulkUpdateApiKeysResponseDeserializer( + ObjectDeserializer op) { + + op.add(Builder::errors, BulkError._DESERIALIZER, "errors"); + op.add(Builder::noops, JsonpDeserializer.arrayDeserializer(JsonpDeserializer.stringDeserializer()), "noops"); + op.add(Builder::updated, JsonpDeserializer.arrayDeserializer(JsonpDeserializer.stringDeserializer()), + "updated"); + + } + +} diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/security/DelegatePkiRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/security/DelegatePkiRequest.java new file mode 100644 index 000000000..e6a6238d1 --- /dev/null +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/security/DelegatePkiRequest.java @@ -0,0 +1,251 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package co.elastic.clients.elasticsearch.security; + +import co.elastic.clients.elasticsearch._types.ErrorResponse; +import co.elastic.clients.elasticsearch._types.RequestBase; +import co.elastic.clients.json.JsonpDeserializable; +import co.elastic.clients.json.JsonpDeserializer; +import co.elastic.clients.json.JsonpMapper; +import co.elastic.clients.json.JsonpSerializable; +import co.elastic.clients.json.ObjectBuilderDeserializer; +import co.elastic.clients.json.ObjectDeserializer; +import co.elastic.clients.transport.Endpoint; +import co.elastic.clients.transport.endpoints.SimpleEndpoint; +import co.elastic.clients.util.ApiTypeHelper; +import co.elastic.clients.util.ObjectBuilder; +import jakarta.json.stream.JsonGenerator; +import java.lang.String; +import java.util.Collections; +import java.util.List; +import java.util.Objects; +import java.util.function.Function; +import javax.annotation.Nullable; + +//---------------------------------------------------------------- +// THIS CODE IS GENERATED. MANUAL EDITS WILL BE LOST. +//---------------------------------------------------------------- +// +// This code is generated from the Elasticsearch API specification +// at https://github.com/elastic/elasticsearch-specification +// +// Manual updates to this file will be lost when the code is +// re-generated. +// +// If you find a property that is missing or wrongly typed, please +// open an issue or a PR on the API specification repository. +// +//---------------------------------------------------------------- + +// typedef: security.delegate_pki.Request + +/** + * Delegate PKI authentication. This API implements the exchange of an + * X509Certificate chain for an Elasticsearch access token. The certificate + * chain is validated, according to RFC 5280, by sequentially considering the + * trust configuration of every installed PKI realm that has + * delegation.enabled set to true. A successfully + * trusted client certificate is also subject to the validation of the subject + * distinguished name according to thw username_pattern of the + * respective realm. + *

    + * This API is called by smart and trusted proxies, such as Kibana, which + * terminate the user's TLS session but still want to authenticate the user by + * using a PKI realm—-​as if the user connected directly to Elasticsearch. + *

    + * IMPORTANT: The association between the subject public key in the target + * certificate and the corresponding private key is not validated. This is part + * of the TLS authentication process and it is delegated to the proxy that calls + * this API. The proxy is trusted to have performed the TLS authentication and + * this API translates that authentication into an Elasticsearch access token. + * + * @see API + * specification + */ +@JsonpDeserializable +public class DelegatePkiRequest extends RequestBase implements JsonpSerializable { + private final List x509CertificateChain; + + // --------------------------------------------------------------------------------------------- + + private DelegatePkiRequest(Builder builder) { + + this.x509CertificateChain = ApiTypeHelper.unmodifiableRequired(builder.x509CertificateChain, this, + "x509CertificateChain"); + + } + + public static DelegatePkiRequest of(Function> fn) { + return fn.apply(new Builder()).build(); + } + + /** + * Required - The X509Certificate chain, which is represented as an ordered + * string array. Each string in the array is a base64-encoded (Section 4 of + * RFC4648 - not base64url-encoded) of the certificate's DER encoding. + *

    + * The first element is the target certificate that contains the subject + * distinguished name that is requesting access. This may be followed by + * additional certificates; each subsequent certificate is used to certify the + * previous one. + *

    + * API name: {@code x509_certificate_chain} + */ + public final List x509CertificateChain() { + return this.x509CertificateChain; + } + + /** + * Serialize this object to JSON. + */ + public void serialize(JsonGenerator generator, JsonpMapper mapper) { + generator.writeStartObject(); + serializeInternal(generator, mapper); + generator.writeEnd(); + } + + protected void serializeInternal(JsonGenerator generator, JsonpMapper mapper) { + + if (ApiTypeHelper.isDefined(this.x509CertificateChain)) { + generator.writeKey("x509_certificate_chain"); + generator.writeStartArray(); + for (String item0 : this.x509CertificateChain) { + generator.write(item0); + + } + generator.writeEnd(); + + } + + } + + // --------------------------------------------------------------------------------------------- + + /** + * Builder for {@link DelegatePkiRequest}. + */ + + public static class Builder extends RequestBase.AbstractBuilder + implements + ObjectBuilder { + private List x509CertificateChain; + + /** + * Required - The X509Certificate chain, which is represented as an ordered + * string array. Each string in the array is a base64-encoded (Section 4 of + * RFC4648 - not base64url-encoded) of the certificate's DER encoding. + *

    + * The first element is the target certificate that contains the subject + * distinguished name that is requesting access. This may be followed by + * additional certificates; each subsequent certificate is used to certify the + * previous one. + *

    + * API name: {@code x509_certificate_chain} + *

    + * Adds all elements of list to x509CertificateChain. + */ + public final Builder x509CertificateChain(List list) { + this.x509CertificateChain = _listAddAll(this.x509CertificateChain, list); + return this; + } + + /** + * Required - The X509Certificate chain, which is represented as an ordered + * string array. Each string in the array is a base64-encoded (Section 4 of + * RFC4648 - not base64url-encoded) of the certificate's DER encoding. + *

    + * The first element is the target certificate that contains the subject + * distinguished name that is requesting access. This may be followed by + * additional certificates; each subsequent certificate is used to certify the + * previous one. + *

    + * API name: {@code x509_certificate_chain} + *

    + * Adds one or more values to x509CertificateChain. + */ + public final Builder x509CertificateChain(String value, String... values) { + this.x509CertificateChain = _listAdd(this.x509CertificateChain, value, values); + return this; + } + + @Override + protected Builder self() { + return this; + } + + /** + * Builds a {@link DelegatePkiRequest}. + * + * @throws NullPointerException + * if some of the required fields are null. + */ + public DelegatePkiRequest build() { + _checkSingleUse(); + + return new DelegatePkiRequest(this); + } + } + + // --------------------------------------------------------------------------------------------- + + /** + * Json deserializer for {@link DelegatePkiRequest} + */ + public static final JsonpDeserializer _DESERIALIZER = ObjectBuilderDeserializer + .lazy(Builder::new, DelegatePkiRequest::setupDelegatePkiRequestDeserializer); + + protected static void setupDelegatePkiRequestDeserializer(ObjectDeserializer op) { + + op.add(Builder::x509CertificateChain, + JsonpDeserializer.arrayDeserializer(JsonpDeserializer.stringDeserializer()), "x509_certificate_chain"); + + } + + // --------------------------------------------------------------------------------------------- + + /** + * Endpoint "{@code security.delegate_pki}". + */ + public static final Endpoint _ENDPOINT = new SimpleEndpoint<>( + "es/security.delegate_pki", + + // Request method + request -> { + return "POST"; + + }, + + // Request path + request -> { + return "/_security/delegate_pki"; + + }, + + // Path parameters + request -> { + return Collections.emptyMap(); + }, + + // Request parameters + request -> { + return Collections.emptyMap(); + + }, SimpleEndpoint.emptyMap(), true, DelegatePkiResponse._DESERIALIZER); +} diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/security/DelegatePkiResponse.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/security/DelegatePkiResponse.java new file mode 100644 index 000000000..84aab8f1d --- /dev/null +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/security/DelegatePkiResponse.java @@ -0,0 +1,256 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package co.elastic.clients.elasticsearch.security; + +import co.elastic.clients.elasticsearch.security.delegate_pki.Authentication; +import co.elastic.clients.json.JsonpDeserializable; +import co.elastic.clients.json.JsonpDeserializer; +import co.elastic.clients.json.JsonpMapper; +import co.elastic.clients.json.JsonpSerializable; +import co.elastic.clients.json.JsonpUtils; +import co.elastic.clients.json.ObjectBuilderDeserializer; +import co.elastic.clients.json.ObjectDeserializer; +import co.elastic.clients.util.ApiTypeHelper; +import co.elastic.clients.util.ObjectBuilder; +import co.elastic.clients.util.WithJsonObjectBuilderBase; +import jakarta.json.stream.JsonGenerator; +import java.lang.Long; +import java.lang.String; +import java.util.Objects; +import java.util.function.Function; +import javax.annotation.Nullable; + +//---------------------------------------------------------------- +// THIS CODE IS GENERATED. MANUAL EDITS WILL BE LOST. +//---------------------------------------------------------------- +// +// This code is generated from the Elasticsearch API specification +// at https://github.com/elastic/elasticsearch-specification +// +// Manual updates to this file will be lost when the code is +// re-generated. +// +// If you find a property that is missing or wrongly typed, please +// open an issue or a PR on the API specification repository. +// +//---------------------------------------------------------------- + +// typedef: security.delegate_pki.Response + +/** + * + * @see API + * specification + */ +@JsonpDeserializable +public class DelegatePkiResponse implements JsonpSerializable { + private final String accessToken; + + private final long expiresIn; + + private final String type; + + @Nullable + private final Authentication authentication; + + // --------------------------------------------------------------------------------------------- + + private DelegatePkiResponse(Builder builder) { + + this.accessToken = ApiTypeHelper.requireNonNull(builder.accessToken, this, "accessToken"); + this.expiresIn = ApiTypeHelper.requireNonNull(builder.expiresIn, this, "expiresIn"); + this.type = ApiTypeHelper.requireNonNull(builder.type, this, "type"); + this.authentication = builder.authentication; + + } + + public static DelegatePkiResponse of(Function> fn) { + return fn.apply(new Builder()).build(); + } + + /** + * Required - An access token associated with the subject distinguished name of + * the client's certificate. + *

    + * API name: {@code access_token} + */ + public final String accessToken() { + return this.accessToken; + } + + /** + * Required - The amount of time (in seconds) before the token expires. + *

    + * API name: {@code expires_in} + */ + public final long expiresIn() { + return this.expiresIn; + } + + /** + * Required - The type of token. + *

    + * API name: {@code type} + */ + public final String type() { + return this.type; + } + + /** + * API name: {@code authentication} + */ + @Nullable + public final Authentication authentication() { + return this.authentication; + } + + /** + * Serialize this object to JSON. + */ + public void serialize(JsonGenerator generator, JsonpMapper mapper) { + generator.writeStartObject(); + serializeInternal(generator, mapper); + generator.writeEnd(); + } + + protected void serializeInternal(JsonGenerator generator, JsonpMapper mapper) { + + generator.writeKey("access_token"); + generator.write(this.accessToken); + + generator.writeKey("expires_in"); + generator.write(this.expiresIn); + + generator.writeKey("type"); + generator.write(this.type); + + if (this.authentication != null) { + generator.writeKey("authentication"); + this.authentication.serialize(generator, mapper); + + } + + } + + @Override + public String toString() { + return JsonpUtils.toString(this); + } + + // --------------------------------------------------------------------------------------------- + + /** + * Builder for {@link DelegatePkiResponse}. + */ + + public static class Builder extends WithJsonObjectBuilderBase + implements + ObjectBuilder { + private String accessToken; + + private Long expiresIn; + + private String type; + + @Nullable + private Authentication authentication; + + /** + * Required - An access token associated with the subject distinguished name of + * the client's certificate. + *

    + * API name: {@code access_token} + */ + public final Builder accessToken(String value) { + this.accessToken = value; + return this; + } + + /** + * Required - The amount of time (in seconds) before the token expires. + *

    + * API name: {@code expires_in} + */ + public final Builder expiresIn(long value) { + this.expiresIn = value; + return this; + } + + /** + * Required - The type of token. + *

    + * API name: {@code type} + */ + public final Builder type(String value) { + this.type = value; + return this; + } + + /** + * API name: {@code authentication} + */ + public final Builder authentication(@Nullable Authentication value) { + this.authentication = value; + return this; + } + + /** + * API name: {@code authentication} + */ + public final Builder authentication(Function> fn) { + return this.authentication(fn.apply(new Authentication.Builder()).build()); + } + + @Override + protected Builder self() { + return this; + } + + /** + * Builds a {@link DelegatePkiResponse}. + * + * @throws NullPointerException + * if some of the required fields are null. + */ + public DelegatePkiResponse build() { + _checkSingleUse(); + + return new DelegatePkiResponse(this); + } + } + + // --------------------------------------------------------------------------------------------- + + /** + * Json deserializer for {@link DelegatePkiResponse} + */ + public static final JsonpDeserializer _DESERIALIZER = ObjectBuilderDeserializer + .lazy(Builder::new, DelegatePkiResponse::setupDelegatePkiResponseDeserializer); + + protected static void setupDelegatePkiResponseDeserializer(ObjectDeserializer op) { + + op.add(Builder::accessToken, JsonpDeserializer.stringDeserializer(), "access_token"); + op.add(Builder::expiresIn, JsonpDeserializer.longDeserializer(), "expires_in"); + op.add(Builder::type, JsonpDeserializer.stringDeserializer(), "type"); + op.add(Builder::authentication, Authentication._DESERIALIZER, "authentication"); + + } + +} diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/security/ElasticsearchSecurityAsyncClient.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/security/ElasticsearchSecurityAsyncClient.java index 82e2b73e3..85486e21d 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/security/ElasticsearchSecurityAsyncClient.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/security/ElasticsearchSecurityAsyncClient.java @@ -210,6 +210,89 @@ public final CompletableFuture bulkPutRole( return bulkPutRole(fn.apply(new BulkPutRoleRequest.Builder()).build()); } + // ----- Endpoint: security.bulk_update_api_keys + + /** + * Bulk update API keys. Update the attributes for multiple API keys. + *

    + * IMPORTANT: It is not possible to use an API key as the authentication + * credential for this API. To update API keys, the owner user's credentials are + * required. + *

    + * This API is similar to the update API key API but enables you to apply the + * same update to multiple API keys in one API call. This operation can greatly + * improve performance over making individual updates. + *

    + * It is not possible to update expired or invalidated API keys. + *

    + * This API supports updates to API key access scope, metadata and expiration. + * The access scope of each API key is derived from the + * role_descriptors you specify in the request and a snapshot of + * the owner user's permissions at the time of the request. The snapshot of the + * owner's permissions is updated automatically on every call. + *

    + * IMPORTANT: If you don't specify role_descriptors in the request, + * a call to this API might still change an API key's access scope. This change + * can occur if the owner user's permissions have changed since the API key was + * created or last modified. + *

    + * A successful request returns a JSON structure that contains the IDs of all + * updated API keys, the IDs of API keys that already had the requested changes + * and did not require an update, and error details for any failed update. + * + * @see Documentation + * on elastic.co + */ + + public CompletableFuture bulkUpdateApiKeys(BulkUpdateApiKeysRequest request) { + @SuppressWarnings("unchecked") + JsonEndpoint endpoint = (JsonEndpoint) BulkUpdateApiKeysRequest._ENDPOINT; + + return this.transport.performRequestAsync(request, endpoint, this.transportOptions); + } + + /** + * Bulk update API keys. Update the attributes for multiple API keys. + *

    + * IMPORTANT: It is not possible to use an API key as the authentication + * credential for this API. To update API keys, the owner user's credentials are + * required. + *

    + * This API is similar to the update API key API but enables you to apply the + * same update to multiple API keys in one API call. This operation can greatly + * improve performance over making individual updates. + *

    + * It is not possible to update expired or invalidated API keys. + *

    + * This API supports updates to API key access scope, metadata and expiration. + * The access scope of each API key is derived from the + * role_descriptors you specify in the request and a snapshot of + * the owner user's permissions at the time of the request. The snapshot of the + * owner's permissions is updated automatically on every call. + *

    + * IMPORTANT: If you don't specify role_descriptors in the request, + * a call to this API might still change an API key's access scope. This change + * can occur if the owner user's permissions have changed since the API key was + * created or last modified. + *

    + * A successful request returns a JSON structure that contains the IDs of all + * updated API keys, the IDs of API keys that already had the requested changes + * and did not require an update, and error details for any failed update. + * + * @param fn + * a function that initializes a builder to create the + * {@link BulkUpdateApiKeysRequest} + * @see Documentation + * on elastic.co + */ + + public final CompletableFuture bulkUpdateApiKeys( + Function> fn) { + return bulkUpdateApiKeys(fn.apply(new BulkUpdateApiKeysRequest.Builder()).build()); + } + // ----- Endpoint: security.change_password /** @@ -646,6 +729,73 @@ public final CompletableFuture createServiceToken( return createServiceToken(fn.apply(new CreateServiceTokenRequest.Builder()).build()); } + // ----- Endpoint: security.delegate_pki + + /** + * Delegate PKI authentication. This API implements the exchange of an + * X509Certificate chain for an Elasticsearch access token. The certificate + * chain is validated, according to RFC 5280, by sequentially considering the + * trust configuration of every installed PKI realm that has + * delegation.enabled set to true. A successfully + * trusted client certificate is also subject to the validation of the subject + * distinguished name according to thw username_pattern of the + * respective realm. + *

    + * This API is called by smart and trusted proxies, such as Kibana, which + * terminate the user's TLS session but still want to authenticate the user by + * using a PKI realm—-​as if the user connected directly to Elasticsearch. + *

    + * IMPORTANT: The association between the subject public key in the target + * certificate and the corresponding private key is not validated. This is part + * of the TLS authentication process and it is delegated to the proxy that calls + * this API. The proxy is trusted to have performed the TLS authentication and + * this API translates that authentication into an Elasticsearch access token. + * + * @see Documentation + * on elastic.co + */ + + public CompletableFuture delegatePki(DelegatePkiRequest request) { + @SuppressWarnings("unchecked") + JsonEndpoint endpoint = (JsonEndpoint) DelegatePkiRequest._ENDPOINT; + + return this.transport.performRequestAsync(request, endpoint, this.transportOptions); + } + + /** + * Delegate PKI authentication. This API implements the exchange of an + * X509Certificate chain for an Elasticsearch access token. The certificate + * chain is validated, according to RFC 5280, by sequentially considering the + * trust configuration of every installed PKI realm that has + * delegation.enabled set to true. A successfully + * trusted client certificate is also subject to the validation of the subject + * distinguished name according to thw username_pattern of the + * respective realm. + *

    + * This API is called by smart and trusted proxies, such as Kibana, which + * terminate the user's TLS session but still want to authenticate the user by + * using a PKI realm—-​as if the user connected directly to Elasticsearch. + *

    + * IMPORTANT: The association between the subject public key in the target + * certificate and the corresponding private key is not validated. This is part + * of the TLS authentication process and it is delegated to the proxy that calls + * this API. The proxy is trusted to have performed the TLS authentication and + * this API translates that authentication into an Elasticsearch access token. + * + * @param fn + * a function that initializes a builder to create the + * {@link DelegatePkiRequest} + * @see Documentation + * on elastic.co + */ + + public final CompletableFuture delegatePki( + Function> fn) { + return delegatePki(fn.apply(new DelegatePkiRequest.Builder()).build()); + } + // ----- Endpoint: security.delete_privileges /** @@ -1861,6 +2011,190 @@ public CompletableFuture invalidateToken() { InvalidateTokenRequest._ENDPOINT, this.transportOptions); } + // ----- Endpoint: security.oidc_authenticate + + /** + * Authenticate OpenID Connect. Exchange an OpenID Connect authentication + * response message for an Elasticsearch internal access token and refresh token + * that can be subsequently used for authentication. + *

    + * Elasticsearch exposes all the necessary OpenID Connect related functionality + * with the OpenID Connect APIs. These APIs are used internally by Kibana in + * order to provide OpenID Connect based authentication, but can also be used by + * other, custom web applications or other clients. + * + * @see Documentation + * on elastic.co + */ + + public CompletableFuture oidcAuthenticate(OidcAuthenticateRequest request) { + @SuppressWarnings("unchecked") + JsonEndpoint endpoint = (JsonEndpoint) OidcAuthenticateRequest._ENDPOINT; + + return this.transport.performRequestAsync(request, endpoint, this.transportOptions); + } + + /** + * Authenticate OpenID Connect. Exchange an OpenID Connect authentication + * response message for an Elasticsearch internal access token and refresh token + * that can be subsequently used for authentication. + *

    + * Elasticsearch exposes all the necessary OpenID Connect related functionality + * with the OpenID Connect APIs. These APIs are used internally by Kibana in + * order to provide OpenID Connect based authentication, but can also be used by + * other, custom web applications or other clients. + * + * @param fn + * a function that initializes a builder to create the + * {@link OidcAuthenticateRequest} + * @see Documentation + * on elastic.co + */ + + public final CompletableFuture oidcAuthenticate( + Function> fn) { + return oidcAuthenticate(fn.apply(new OidcAuthenticateRequest.Builder()).build()); + } + + // ----- Endpoint: security.oidc_logout + + /** + * Logout of OpenID Connect. Invalidate an access token and a refresh token that + * were generated as a response to the /_security/oidc/authenticate + * API. + *

    + * If the OpenID Connect authentication realm in Elasticsearch is accordingly + * configured, the response to this call will contain a URI pointing to the end + * session endpoint of the OpenID Connect Provider in order to perform single + * logout. + *

    + * Elasticsearch exposes all the necessary OpenID Connect related functionality + * with the OpenID Connect APIs. These APIs are used internally by Kibana in + * order to provide OpenID Connect based authentication, but can also be used by + * other, custom web applications or other clients. + * + * @see Documentation + * on elastic.co + */ + + public CompletableFuture oidcLogout(OidcLogoutRequest request) { + @SuppressWarnings("unchecked") + JsonEndpoint endpoint = (JsonEndpoint) OidcLogoutRequest._ENDPOINT; + + return this.transport.performRequestAsync(request, endpoint, this.transportOptions); + } + + /** + * Logout of OpenID Connect. Invalidate an access token and a refresh token that + * were generated as a response to the /_security/oidc/authenticate + * API. + *

    + * If the OpenID Connect authentication realm in Elasticsearch is accordingly + * configured, the response to this call will contain a URI pointing to the end + * session endpoint of the OpenID Connect Provider in order to perform single + * logout. + *

    + * Elasticsearch exposes all the necessary OpenID Connect related functionality + * with the OpenID Connect APIs. These APIs are used internally by Kibana in + * order to provide OpenID Connect based authentication, but can also be used by + * other, custom web applications or other clients. + * + * @param fn + * a function that initializes a builder to create the + * {@link OidcLogoutRequest} + * @see Documentation + * on elastic.co + */ + + public final CompletableFuture oidcLogout( + Function> fn) { + return oidcLogout(fn.apply(new OidcLogoutRequest.Builder()).build()); + } + + // ----- Endpoint: security.oidc_prepare_authentication + + /** + * Prepare OpenID connect authentication. Create an oAuth 2.0 authentication + * request as a URL string based on the configuration of the OpenID Connect + * authentication realm in Elasticsearch. + *

    + * The response of this API is a URL pointing to the Authorization Endpoint of + * the configured OpenID Connect Provider, which can be used to redirect the + * browser of the user in order to continue the authentication process. + *

    + * Elasticsearch exposes all the necessary OpenID Connect related functionality + * with the OpenID Connect APIs. These APIs are used internally by Kibana in + * order to provide OpenID Connect based authentication, but can also be used by + * other, custom web applications or other clients. + * + * @see Documentation + * on elastic.co + */ + + public CompletableFuture oidcPrepareAuthentication( + OidcPrepareAuthenticationRequest request) { + @SuppressWarnings("unchecked") + JsonEndpoint endpoint = (JsonEndpoint) OidcPrepareAuthenticationRequest._ENDPOINT; + + return this.transport.performRequestAsync(request, endpoint, this.transportOptions); + } + + /** + * Prepare OpenID connect authentication. Create an oAuth 2.0 authentication + * request as a URL string based on the configuration of the OpenID Connect + * authentication realm in Elasticsearch. + *

    + * The response of this API is a URL pointing to the Authorization Endpoint of + * the configured OpenID Connect Provider, which can be used to redirect the + * browser of the user in order to continue the authentication process. + *

    + * Elasticsearch exposes all the necessary OpenID Connect related functionality + * with the OpenID Connect APIs. These APIs are used internally by Kibana in + * order to provide OpenID Connect based authentication, but can also be used by + * other, custom web applications or other clients. + * + * @param fn + * a function that initializes a builder to create the + * {@link OidcPrepareAuthenticationRequest} + * @see Documentation + * on elastic.co + */ + + public final CompletableFuture oidcPrepareAuthentication( + Function> fn) { + return oidcPrepareAuthentication(fn.apply(new OidcPrepareAuthenticationRequest.Builder()).build()); + } + + /** + * Prepare OpenID connect authentication. Create an oAuth 2.0 authentication + * request as a URL string based on the configuration of the OpenID Connect + * authentication realm in Elasticsearch. + *

    + * The response of this API is a URL pointing to the Authorization Endpoint of + * the configured OpenID Connect Provider, which can be used to redirect the + * browser of the user in order to continue the authentication process. + *

    + * Elasticsearch exposes all the necessary OpenID Connect related functionality + * with the OpenID Connect APIs. These APIs are used internally by Kibana in + * order to provide OpenID Connect based authentication, but can also be used by + * other, custom web applications or other clients. + * + * @see Documentation + * on elastic.co + */ + + public CompletableFuture oidcPrepareAuthentication() { + return this.transport.performRequestAsync(new OidcPrepareAuthenticationRequest.Builder().build(), + OidcPrepareAuthenticationRequest._ENDPOINT, this.transportOptions); + } + // ----- Endpoint: security.put_privileges /** diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/security/ElasticsearchSecurityClient.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/security/ElasticsearchSecurityClient.java index ae97689ae..0b8972bd4 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/security/ElasticsearchSecurityClient.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/security/ElasticsearchSecurityClient.java @@ -213,6 +213,91 @@ public final BulkPutRoleResponse bulkPutRole( return bulkPutRole(fn.apply(new BulkPutRoleRequest.Builder()).build()); } + // ----- Endpoint: security.bulk_update_api_keys + + /** + * Bulk update API keys. Update the attributes for multiple API keys. + *

    + * IMPORTANT: It is not possible to use an API key as the authentication + * credential for this API. To update API keys, the owner user's credentials are + * required. + *

    + * This API is similar to the update API key API but enables you to apply the + * same update to multiple API keys in one API call. This operation can greatly + * improve performance over making individual updates. + *

    + * It is not possible to update expired or invalidated API keys. + *

    + * This API supports updates to API key access scope, metadata and expiration. + * The access scope of each API key is derived from the + * role_descriptors you specify in the request and a snapshot of + * the owner user's permissions at the time of the request. The snapshot of the + * owner's permissions is updated automatically on every call. + *

    + * IMPORTANT: If you don't specify role_descriptors in the request, + * a call to this API might still change an API key's access scope. This change + * can occur if the owner user's permissions have changed since the API key was + * created or last modified. + *

    + * A successful request returns a JSON structure that contains the IDs of all + * updated API keys, the IDs of API keys that already had the requested changes + * and did not require an update, and error details for any failed update. + * + * @see Documentation + * on elastic.co + */ + + public BulkUpdateApiKeysResponse bulkUpdateApiKeys(BulkUpdateApiKeysRequest request) + throws IOException, ElasticsearchException { + @SuppressWarnings("unchecked") + JsonEndpoint endpoint = (JsonEndpoint) BulkUpdateApiKeysRequest._ENDPOINT; + + return this.transport.performRequest(request, endpoint, this.transportOptions); + } + + /** + * Bulk update API keys. Update the attributes for multiple API keys. + *

    + * IMPORTANT: It is not possible to use an API key as the authentication + * credential for this API. To update API keys, the owner user's credentials are + * required. + *

    + * This API is similar to the update API key API but enables you to apply the + * same update to multiple API keys in one API call. This operation can greatly + * improve performance over making individual updates. + *

    + * It is not possible to update expired or invalidated API keys. + *

    + * This API supports updates to API key access scope, metadata and expiration. + * The access scope of each API key is derived from the + * role_descriptors you specify in the request and a snapshot of + * the owner user's permissions at the time of the request. The snapshot of the + * owner's permissions is updated automatically on every call. + *

    + * IMPORTANT: If you don't specify role_descriptors in the request, + * a call to this API might still change an API key's access scope. This change + * can occur if the owner user's permissions have changed since the API key was + * created or last modified. + *

    + * A successful request returns a JSON structure that contains the IDs of all + * updated API keys, the IDs of API keys that already had the requested changes + * and did not require an update, and error details for any failed update. + * + * @param fn + * a function that initializes a builder to create the + * {@link BulkUpdateApiKeysRequest} + * @see Documentation + * on elastic.co + */ + + public final BulkUpdateApiKeysResponse bulkUpdateApiKeys( + Function> fn) + throws IOException, ElasticsearchException { + return bulkUpdateApiKeys(fn.apply(new BulkUpdateApiKeysRequest.Builder()).build()); + } + // ----- Endpoint: security.change_password /** @@ -663,6 +748,74 @@ public final CreateServiceTokenResponse createServiceToken( return createServiceToken(fn.apply(new CreateServiceTokenRequest.Builder()).build()); } + // ----- Endpoint: security.delegate_pki + + /** + * Delegate PKI authentication. This API implements the exchange of an + * X509Certificate chain for an Elasticsearch access token. The certificate + * chain is validated, according to RFC 5280, by sequentially considering the + * trust configuration of every installed PKI realm that has + * delegation.enabled set to true. A successfully + * trusted client certificate is also subject to the validation of the subject + * distinguished name according to thw username_pattern of the + * respective realm. + *

    + * This API is called by smart and trusted proxies, such as Kibana, which + * terminate the user's TLS session but still want to authenticate the user by + * using a PKI realm—-​as if the user connected directly to Elasticsearch. + *

    + * IMPORTANT: The association between the subject public key in the target + * certificate and the corresponding private key is not validated. This is part + * of the TLS authentication process and it is delegated to the proxy that calls + * this API. The proxy is trusted to have performed the TLS authentication and + * this API translates that authentication into an Elasticsearch access token. + * + * @see Documentation + * on elastic.co + */ + + public DelegatePkiResponse delegatePki(DelegatePkiRequest request) throws IOException, ElasticsearchException { + @SuppressWarnings("unchecked") + JsonEndpoint endpoint = (JsonEndpoint) DelegatePkiRequest._ENDPOINT; + + return this.transport.performRequest(request, endpoint, this.transportOptions); + } + + /** + * Delegate PKI authentication. This API implements the exchange of an + * X509Certificate chain for an Elasticsearch access token. The certificate + * chain is validated, according to RFC 5280, by sequentially considering the + * trust configuration of every installed PKI realm that has + * delegation.enabled set to true. A successfully + * trusted client certificate is also subject to the validation of the subject + * distinguished name according to thw username_pattern of the + * respective realm. + *

    + * This API is called by smart and trusted proxies, such as Kibana, which + * terminate the user's TLS session but still want to authenticate the user by + * using a PKI realm—-​as if the user connected directly to Elasticsearch. + *

    + * IMPORTANT: The association between the subject public key in the target + * certificate and the corresponding private key is not validated. This is part + * of the TLS authentication process and it is delegated to the proxy that calls + * this API. The proxy is trusted to have performed the TLS authentication and + * this API translates that authentication into an Elasticsearch access token. + * + * @param fn + * a function that initializes a builder to create the + * {@link DelegatePkiRequest} + * @see Documentation + * on elastic.co + */ + + public final DelegatePkiResponse delegatePki( + Function> fn) + throws IOException, ElasticsearchException { + return delegatePki(fn.apply(new DelegatePkiRequest.Builder()).build()); + } + // ----- Endpoint: security.delete_privileges /** @@ -1908,6 +2061,193 @@ public InvalidateTokenResponse invalidateToken() throws IOException, Elasticsear InvalidateTokenRequest._ENDPOINT, this.transportOptions); } + // ----- Endpoint: security.oidc_authenticate + + /** + * Authenticate OpenID Connect. Exchange an OpenID Connect authentication + * response message for an Elasticsearch internal access token and refresh token + * that can be subsequently used for authentication. + *

    + * Elasticsearch exposes all the necessary OpenID Connect related functionality + * with the OpenID Connect APIs. These APIs are used internally by Kibana in + * order to provide OpenID Connect based authentication, but can also be used by + * other, custom web applications or other clients. + * + * @see Documentation + * on elastic.co + */ + + public OidcAuthenticateResponse oidcAuthenticate(OidcAuthenticateRequest request) + throws IOException, ElasticsearchException { + @SuppressWarnings("unchecked") + JsonEndpoint endpoint = (JsonEndpoint) OidcAuthenticateRequest._ENDPOINT; + + return this.transport.performRequest(request, endpoint, this.transportOptions); + } + + /** + * Authenticate OpenID Connect. Exchange an OpenID Connect authentication + * response message for an Elasticsearch internal access token and refresh token + * that can be subsequently used for authentication. + *

    + * Elasticsearch exposes all the necessary OpenID Connect related functionality + * with the OpenID Connect APIs. These APIs are used internally by Kibana in + * order to provide OpenID Connect based authentication, but can also be used by + * other, custom web applications or other clients. + * + * @param fn + * a function that initializes a builder to create the + * {@link OidcAuthenticateRequest} + * @see Documentation + * on elastic.co + */ + + public final OidcAuthenticateResponse oidcAuthenticate( + Function> fn) + throws IOException, ElasticsearchException { + return oidcAuthenticate(fn.apply(new OidcAuthenticateRequest.Builder()).build()); + } + + // ----- Endpoint: security.oidc_logout + + /** + * Logout of OpenID Connect. Invalidate an access token and a refresh token that + * were generated as a response to the /_security/oidc/authenticate + * API. + *

    + * If the OpenID Connect authentication realm in Elasticsearch is accordingly + * configured, the response to this call will contain a URI pointing to the end + * session endpoint of the OpenID Connect Provider in order to perform single + * logout. + *

    + * Elasticsearch exposes all the necessary OpenID Connect related functionality + * with the OpenID Connect APIs. These APIs are used internally by Kibana in + * order to provide OpenID Connect based authentication, but can also be used by + * other, custom web applications or other clients. + * + * @see Documentation + * on elastic.co + */ + + public OidcLogoutResponse oidcLogout(OidcLogoutRequest request) throws IOException, ElasticsearchException { + @SuppressWarnings("unchecked") + JsonEndpoint endpoint = (JsonEndpoint) OidcLogoutRequest._ENDPOINT; + + return this.transport.performRequest(request, endpoint, this.transportOptions); + } + + /** + * Logout of OpenID Connect. Invalidate an access token and a refresh token that + * were generated as a response to the /_security/oidc/authenticate + * API. + *

    + * If the OpenID Connect authentication realm in Elasticsearch is accordingly + * configured, the response to this call will contain a URI pointing to the end + * session endpoint of the OpenID Connect Provider in order to perform single + * logout. + *

    + * Elasticsearch exposes all the necessary OpenID Connect related functionality + * with the OpenID Connect APIs. These APIs are used internally by Kibana in + * order to provide OpenID Connect based authentication, but can also be used by + * other, custom web applications or other clients. + * + * @param fn + * a function that initializes a builder to create the + * {@link OidcLogoutRequest} + * @see Documentation + * on elastic.co + */ + + public final OidcLogoutResponse oidcLogout(Function> fn) + throws IOException, ElasticsearchException { + return oidcLogout(fn.apply(new OidcLogoutRequest.Builder()).build()); + } + + // ----- Endpoint: security.oidc_prepare_authentication + + /** + * Prepare OpenID connect authentication. Create an oAuth 2.0 authentication + * request as a URL string based on the configuration of the OpenID Connect + * authentication realm in Elasticsearch. + *

    + * The response of this API is a URL pointing to the Authorization Endpoint of + * the configured OpenID Connect Provider, which can be used to redirect the + * browser of the user in order to continue the authentication process. + *

    + * Elasticsearch exposes all the necessary OpenID Connect related functionality + * with the OpenID Connect APIs. These APIs are used internally by Kibana in + * order to provide OpenID Connect based authentication, but can also be used by + * other, custom web applications or other clients. + * + * @see Documentation + * on elastic.co + */ + + public OidcPrepareAuthenticationResponse oidcPrepareAuthentication(OidcPrepareAuthenticationRequest request) + throws IOException, ElasticsearchException { + @SuppressWarnings("unchecked") + JsonEndpoint endpoint = (JsonEndpoint) OidcPrepareAuthenticationRequest._ENDPOINT; + + return this.transport.performRequest(request, endpoint, this.transportOptions); + } + + /** + * Prepare OpenID connect authentication. Create an oAuth 2.0 authentication + * request as a URL string based on the configuration of the OpenID Connect + * authentication realm in Elasticsearch. + *

    + * The response of this API is a URL pointing to the Authorization Endpoint of + * the configured OpenID Connect Provider, which can be used to redirect the + * browser of the user in order to continue the authentication process. + *

    + * Elasticsearch exposes all the necessary OpenID Connect related functionality + * with the OpenID Connect APIs. These APIs are used internally by Kibana in + * order to provide OpenID Connect based authentication, but can also be used by + * other, custom web applications or other clients. + * + * @param fn + * a function that initializes a builder to create the + * {@link OidcPrepareAuthenticationRequest} + * @see Documentation + * on elastic.co + */ + + public final OidcPrepareAuthenticationResponse oidcPrepareAuthentication( + Function> fn) + throws IOException, ElasticsearchException { + return oidcPrepareAuthentication(fn.apply(new OidcPrepareAuthenticationRequest.Builder()).build()); + } + + /** + * Prepare OpenID connect authentication. Create an oAuth 2.0 authentication + * request as a URL string based on the configuration of the OpenID Connect + * authentication realm in Elasticsearch. + *

    + * The response of this API is a URL pointing to the Authorization Endpoint of + * the configured OpenID Connect Provider, which can be used to redirect the + * browser of the user in order to continue the authentication process. + *

    + * Elasticsearch exposes all the necessary OpenID Connect related functionality + * with the OpenID Connect APIs. These APIs are used internally by Kibana in + * order to provide OpenID Connect based authentication, but can also be used by + * other, custom web applications or other clients. + * + * @see Documentation + * on elastic.co + */ + + public OidcPrepareAuthenticationResponse oidcPrepareAuthentication() throws IOException, ElasticsearchException { + return this.transport.performRequest(new OidcPrepareAuthenticationRequest.Builder().build(), + OidcPrepareAuthenticationRequest._ENDPOINT, this.transportOptions); + } + // ----- Endpoint: security.put_privileges /** diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/security/OidcAuthenticateRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/security/OidcAuthenticateRequest.java new file mode 100644 index 000000000..44109ca76 --- /dev/null +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/security/OidcAuthenticateRequest.java @@ -0,0 +1,308 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package co.elastic.clients.elasticsearch.security; + +import co.elastic.clients.elasticsearch._types.ErrorResponse; +import co.elastic.clients.elasticsearch._types.RequestBase; +import co.elastic.clients.json.JsonpDeserializable; +import co.elastic.clients.json.JsonpDeserializer; +import co.elastic.clients.json.JsonpMapper; +import co.elastic.clients.json.JsonpSerializable; +import co.elastic.clients.json.ObjectBuilderDeserializer; +import co.elastic.clients.json.ObjectDeserializer; +import co.elastic.clients.transport.Endpoint; +import co.elastic.clients.transport.endpoints.SimpleEndpoint; +import co.elastic.clients.util.ApiTypeHelper; +import co.elastic.clients.util.ObjectBuilder; +import jakarta.json.stream.JsonGenerator; +import java.lang.String; +import java.util.Collections; +import java.util.Objects; +import java.util.function.Function; +import javax.annotation.Nullable; + +//---------------------------------------------------------------- +// THIS CODE IS GENERATED. MANUAL EDITS WILL BE LOST. +//---------------------------------------------------------------- +// +// This code is generated from the Elasticsearch API specification +// at https://github.com/elastic/elasticsearch-specification +// +// Manual updates to this file will be lost when the code is +// re-generated. +// +// If you find a property that is missing or wrongly typed, please +// open an issue or a PR on the API specification repository. +// +//---------------------------------------------------------------- + +// typedef: security.oidc_authenticate.Request + +/** + * Authenticate OpenID Connect. Exchange an OpenID Connect authentication + * response message for an Elasticsearch internal access token and refresh token + * that can be subsequently used for authentication. + *

    + * Elasticsearch exposes all the necessary OpenID Connect related functionality + * with the OpenID Connect APIs. These APIs are used internally by Kibana in + * order to provide OpenID Connect based authentication, but can also be used by + * other, custom web applications or other clients. + * + * @see API + * specification + */ +@JsonpDeserializable +public class OidcAuthenticateRequest extends RequestBase implements JsonpSerializable { + private final String nonce; + + @Nullable + private final String realm; + + private final String redirectUri; + + private final String state; + + // --------------------------------------------------------------------------------------------- + + private OidcAuthenticateRequest(Builder builder) { + + this.nonce = ApiTypeHelper.requireNonNull(builder.nonce, this, "nonce"); + this.realm = builder.realm; + this.redirectUri = ApiTypeHelper.requireNonNull(builder.redirectUri, this, "redirectUri"); + this.state = ApiTypeHelper.requireNonNull(builder.state, this, "state"); + + } + + public static OidcAuthenticateRequest of(Function> fn) { + return fn.apply(new Builder()).build(); + } + + /** + * Required - Associate a client session with an ID token and mitigate replay + * attacks. This value needs to be the same as the one that was provided to the + * /_security/oidc/prepare API or the one that was generated by + * Elasticsearch and included in the response to that call. + *

    + * API name: {@code nonce} + */ + public final String nonce() { + return this.nonce; + } + + /** + * The name of the OpenID Connect realm. This property is useful in cases where + * multiple realms are defined. + *

    + * API name: {@code realm} + */ + @Nullable + public final String realm() { + return this.realm; + } + + /** + * Required - The URL to which the OpenID Connect Provider redirected the User + * Agent in response to an authentication request after a successful + * authentication. This URL must be provided as-is (URL encoded), taken from the + * body of the response or as the value of a location header in the response + * from the OpenID Connect Provider. + *

    + * API name: {@code redirect_uri} + */ + public final String redirectUri() { + return this.redirectUri; + } + + /** + * Required - Maintain state between the authentication request and the + * response. This value needs to be the same as the one that was provided to the + * /_security/oidc/prepare API or the one that was generated by + * Elasticsearch and included in the response to that call. + *

    + * API name: {@code state} + */ + public final String state() { + return this.state; + } + + /** + * Serialize this object to JSON. + */ + public void serialize(JsonGenerator generator, JsonpMapper mapper) { + generator.writeStartObject(); + serializeInternal(generator, mapper); + generator.writeEnd(); + } + + protected void serializeInternal(JsonGenerator generator, JsonpMapper mapper) { + + generator.writeKey("nonce"); + generator.write(this.nonce); + + if (this.realm != null) { + generator.writeKey("realm"); + generator.write(this.realm); + + } + generator.writeKey("redirect_uri"); + generator.write(this.redirectUri); + + generator.writeKey("state"); + generator.write(this.state); + + } + + // --------------------------------------------------------------------------------------------- + + /** + * Builder for {@link OidcAuthenticateRequest}. + */ + + public static class Builder extends RequestBase.AbstractBuilder + implements + ObjectBuilder { + private String nonce; + + @Nullable + private String realm; + + private String redirectUri; + + private String state; + + /** + * Required - Associate a client session with an ID token and mitigate replay + * attacks. This value needs to be the same as the one that was provided to the + * /_security/oidc/prepare API or the one that was generated by + * Elasticsearch and included in the response to that call. + *

    + * API name: {@code nonce} + */ + public final Builder nonce(String value) { + this.nonce = value; + return this; + } + + /** + * The name of the OpenID Connect realm. This property is useful in cases where + * multiple realms are defined. + *

    + * API name: {@code realm} + */ + public final Builder realm(@Nullable String value) { + this.realm = value; + return this; + } + + /** + * Required - The URL to which the OpenID Connect Provider redirected the User + * Agent in response to an authentication request after a successful + * authentication. This URL must be provided as-is (URL encoded), taken from the + * body of the response or as the value of a location header in the response + * from the OpenID Connect Provider. + *

    + * API name: {@code redirect_uri} + */ + public final Builder redirectUri(String value) { + this.redirectUri = value; + return this; + } + + /** + * Required - Maintain state between the authentication request and the + * response. This value needs to be the same as the one that was provided to the + * /_security/oidc/prepare API or the one that was generated by + * Elasticsearch and included in the response to that call. + *

    + * API name: {@code state} + */ + public final Builder state(String value) { + this.state = value; + return this; + } + + @Override + protected Builder self() { + return this; + } + + /** + * Builds a {@link OidcAuthenticateRequest}. + * + * @throws NullPointerException + * if some of the required fields are null. + */ + public OidcAuthenticateRequest build() { + _checkSingleUse(); + + return new OidcAuthenticateRequest(this); + } + } + + // --------------------------------------------------------------------------------------------- + + /** + * Json deserializer for {@link OidcAuthenticateRequest} + */ + public static final JsonpDeserializer _DESERIALIZER = ObjectBuilderDeserializer + .lazy(Builder::new, OidcAuthenticateRequest::setupOidcAuthenticateRequestDeserializer); + + protected static void setupOidcAuthenticateRequestDeserializer( + ObjectDeserializer op) { + + op.add(Builder::nonce, JsonpDeserializer.stringDeserializer(), "nonce"); + op.add(Builder::realm, JsonpDeserializer.stringDeserializer(), "realm"); + op.add(Builder::redirectUri, JsonpDeserializer.stringDeserializer(), "redirect_uri"); + op.add(Builder::state, JsonpDeserializer.stringDeserializer(), "state"); + + } + + // --------------------------------------------------------------------------------------------- + + /** + * Endpoint "{@code security.oidc_authenticate}". + */ + public static final Endpoint _ENDPOINT = new SimpleEndpoint<>( + "es/security.oidc_authenticate", + + // Request method + request -> { + return "POST"; + + }, + + // Request path + request -> { + return "/_security/oidc/authenticate"; + + }, + + // Path parameters + request -> { + return Collections.emptyMap(); + }, + + // Request parameters + request -> { + return Collections.emptyMap(); + + }, SimpleEndpoint.emptyMap(), true, OidcAuthenticateResponse._DESERIALIZER); +} diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/security/OidcAuthenticateResponse.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/security/OidcAuthenticateResponse.java new file mode 100644 index 000000000..037b47deb --- /dev/null +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/security/OidcAuthenticateResponse.java @@ -0,0 +1,246 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package co.elastic.clients.elasticsearch.security; + +import co.elastic.clients.json.JsonpDeserializable; +import co.elastic.clients.json.JsonpDeserializer; +import co.elastic.clients.json.JsonpMapper; +import co.elastic.clients.json.JsonpSerializable; +import co.elastic.clients.json.JsonpUtils; +import co.elastic.clients.json.ObjectBuilderDeserializer; +import co.elastic.clients.json.ObjectDeserializer; +import co.elastic.clients.util.ApiTypeHelper; +import co.elastic.clients.util.ObjectBuilder; +import co.elastic.clients.util.WithJsonObjectBuilderBase; +import jakarta.json.stream.JsonGenerator; +import java.lang.Integer; +import java.lang.String; +import java.util.Objects; +import java.util.function.Function; +import javax.annotation.Nullable; + +//---------------------------------------------------------------- +// THIS CODE IS GENERATED. MANUAL EDITS WILL BE LOST. +//---------------------------------------------------------------- +// +// This code is generated from the Elasticsearch API specification +// at https://github.com/elastic/elasticsearch-specification +// +// Manual updates to this file will be lost when the code is +// re-generated. +// +// If you find a property that is missing or wrongly typed, please +// open an issue or a PR on the API specification repository. +// +//---------------------------------------------------------------- + +// typedef: security.oidc_authenticate.Response + +/** + * + * @see API + * specification + */ +@JsonpDeserializable +public class OidcAuthenticateResponse implements JsonpSerializable { + private final String accessToken; + + private final int expiresIn; + + private final String refreshToken; + + private final String type; + + // --------------------------------------------------------------------------------------------- + + private OidcAuthenticateResponse(Builder builder) { + + this.accessToken = ApiTypeHelper.requireNonNull(builder.accessToken, this, "accessToken"); + this.expiresIn = ApiTypeHelper.requireNonNull(builder.expiresIn, this, "expiresIn"); + this.refreshToken = ApiTypeHelper.requireNonNull(builder.refreshToken, this, "refreshToken"); + this.type = ApiTypeHelper.requireNonNull(builder.type, this, "type"); + + } + + public static OidcAuthenticateResponse of(Function> fn) { + return fn.apply(new Builder()).build(); + } + + /** + * Required - The Elasticsearch access token. + *

    + * API name: {@code access_token} + */ + public final String accessToken() { + return this.accessToken; + } + + /** + * Required - The duration (in seconds) of the tokens. + *

    + * API name: {@code expires_in} + */ + public final int expiresIn() { + return this.expiresIn; + } + + /** + * Required - The Elasticsearch refresh token. + *

    + * API name: {@code refresh_token} + */ + public final String refreshToken() { + return this.refreshToken; + } + + /** + * Required - The type of token. + *

    + * API name: {@code type} + */ + public final String type() { + return this.type; + } + + /** + * Serialize this object to JSON. + */ + public void serialize(JsonGenerator generator, JsonpMapper mapper) { + generator.writeStartObject(); + serializeInternal(generator, mapper); + generator.writeEnd(); + } + + protected void serializeInternal(JsonGenerator generator, JsonpMapper mapper) { + + generator.writeKey("access_token"); + generator.write(this.accessToken); + + generator.writeKey("expires_in"); + generator.write(this.expiresIn); + + generator.writeKey("refresh_token"); + generator.write(this.refreshToken); + + generator.writeKey("type"); + generator.write(this.type); + + } + + @Override + public String toString() { + return JsonpUtils.toString(this); + } + + // --------------------------------------------------------------------------------------------- + + /** + * Builder for {@link OidcAuthenticateResponse}. + */ + + public static class Builder extends WithJsonObjectBuilderBase + implements + ObjectBuilder { + private String accessToken; + + private Integer expiresIn; + + private String refreshToken; + + private String type; + + /** + * Required - The Elasticsearch access token. + *

    + * API name: {@code access_token} + */ + public final Builder accessToken(String value) { + this.accessToken = value; + return this; + } + + /** + * Required - The duration (in seconds) of the tokens. + *

    + * API name: {@code expires_in} + */ + public final Builder expiresIn(int value) { + this.expiresIn = value; + return this; + } + + /** + * Required - The Elasticsearch refresh token. + *

    + * API name: {@code refresh_token} + */ + public final Builder refreshToken(String value) { + this.refreshToken = value; + return this; + } + + /** + * Required - The type of token. + *

    + * API name: {@code type} + */ + public final Builder type(String value) { + this.type = value; + return this; + } + + @Override + protected Builder self() { + return this; + } + + /** + * Builds a {@link OidcAuthenticateResponse}. + * + * @throws NullPointerException + * if some of the required fields are null. + */ + public OidcAuthenticateResponse build() { + _checkSingleUse(); + + return new OidcAuthenticateResponse(this); + } + } + + // --------------------------------------------------------------------------------------------- + + /** + * Json deserializer for {@link OidcAuthenticateResponse} + */ + public static final JsonpDeserializer _DESERIALIZER = ObjectBuilderDeserializer + .lazy(Builder::new, OidcAuthenticateResponse::setupOidcAuthenticateResponseDeserializer); + + protected static void setupOidcAuthenticateResponseDeserializer( + ObjectDeserializer op) { + + op.add(Builder::accessToken, JsonpDeserializer.stringDeserializer(), "access_token"); + op.add(Builder::expiresIn, JsonpDeserializer.integerDeserializer(), "expires_in"); + op.add(Builder::refreshToken, JsonpDeserializer.stringDeserializer(), "refresh_token"); + op.add(Builder::type, JsonpDeserializer.stringDeserializer(), "type"); + + } + +} diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/security/OidcLogoutRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/security/OidcLogoutRequest.java new file mode 100644 index 000000000..b95b7acbb --- /dev/null +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/security/OidcLogoutRequest.java @@ -0,0 +1,234 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package co.elastic.clients.elasticsearch.security; + +import co.elastic.clients.elasticsearch._types.ErrorResponse; +import co.elastic.clients.elasticsearch._types.RequestBase; +import co.elastic.clients.json.JsonpDeserializable; +import co.elastic.clients.json.JsonpDeserializer; +import co.elastic.clients.json.JsonpMapper; +import co.elastic.clients.json.JsonpSerializable; +import co.elastic.clients.json.ObjectBuilderDeserializer; +import co.elastic.clients.json.ObjectDeserializer; +import co.elastic.clients.transport.Endpoint; +import co.elastic.clients.transport.endpoints.SimpleEndpoint; +import co.elastic.clients.util.ApiTypeHelper; +import co.elastic.clients.util.ObjectBuilder; +import jakarta.json.stream.JsonGenerator; +import java.lang.String; +import java.util.Collections; +import java.util.Objects; +import java.util.function.Function; +import javax.annotation.Nullable; + +//---------------------------------------------------------------- +// THIS CODE IS GENERATED. MANUAL EDITS WILL BE LOST. +//---------------------------------------------------------------- +// +// This code is generated from the Elasticsearch API specification +// at https://github.com/elastic/elasticsearch-specification +// +// Manual updates to this file will be lost when the code is +// re-generated. +// +// If you find a property that is missing or wrongly typed, please +// open an issue or a PR on the API specification repository. +// +//---------------------------------------------------------------- + +// typedef: security.oidc_logout.Request + +/** + * Logout of OpenID Connect. Invalidate an access token and a refresh token that + * were generated as a response to the /_security/oidc/authenticate + * API. + *

    + * If the OpenID Connect authentication realm in Elasticsearch is accordingly + * configured, the response to this call will contain a URI pointing to the end + * session endpoint of the OpenID Connect Provider in order to perform single + * logout. + *

    + * Elasticsearch exposes all the necessary OpenID Connect related functionality + * with the OpenID Connect APIs. These APIs are used internally by Kibana in + * order to provide OpenID Connect based authentication, but can also be used by + * other, custom web applications or other clients. + * + * @see API + * specification + */ +@JsonpDeserializable +public class OidcLogoutRequest extends RequestBase implements JsonpSerializable { + private final String accessToken; + + @Nullable + private final String refreshToken; + + // --------------------------------------------------------------------------------------------- + + private OidcLogoutRequest(Builder builder) { + + this.accessToken = ApiTypeHelper.requireNonNull(builder.accessToken, this, "accessToken"); + this.refreshToken = builder.refreshToken; + + } + + public static OidcLogoutRequest of(Function> fn) { + return fn.apply(new Builder()).build(); + } + + /** + * Required - The access token to be invalidated. + *

    + * API name: {@code access_token} + */ + public final String accessToken() { + return this.accessToken; + } + + /** + * The refresh token to be invalidated. + *

    + * API name: {@code refresh_token} + */ + @Nullable + public final String refreshToken() { + return this.refreshToken; + } + + /** + * Serialize this object to JSON. + */ + public void serialize(JsonGenerator generator, JsonpMapper mapper) { + generator.writeStartObject(); + serializeInternal(generator, mapper); + generator.writeEnd(); + } + + protected void serializeInternal(JsonGenerator generator, JsonpMapper mapper) { + + generator.writeKey("access_token"); + generator.write(this.accessToken); + + if (this.refreshToken != null) { + generator.writeKey("refresh_token"); + generator.write(this.refreshToken); + + } + + } + + // --------------------------------------------------------------------------------------------- + + /** + * Builder for {@link OidcLogoutRequest}. + */ + + public static class Builder extends RequestBase.AbstractBuilder + implements + ObjectBuilder { + private String accessToken; + + @Nullable + private String refreshToken; + + /** + * Required - The access token to be invalidated. + *

    + * API name: {@code access_token} + */ + public final Builder accessToken(String value) { + this.accessToken = value; + return this; + } + + /** + * The refresh token to be invalidated. + *

    + * API name: {@code refresh_token} + */ + public final Builder refreshToken(@Nullable String value) { + this.refreshToken = value; + return this; + } + + @Override + protected Builder self() { + return this; + } + + /** + * Builds a {@link OidcLogoutRequest}. + * + * @throws NullPointerException + * if some of the required fields are null. + */ + public OidcLogoutRequest build() { + _checkSingleUse(); + + return new OidcLogoutRequest(this); + } + } + + // --------------------------------------------------------------------------------------------- + + /** + * Json deserializer for {@link OidcLogoutRequest} + */ + public static final JsonpDeserializer _DESERIALIZER = ObjectBuilderDeserializer + .lazy(Builder::new, OidcLogoutRequest::setupOidcLogoutRequestDeserializer); + + protected static void setupOidcLogoutRequestDeserializer(ObjectDeserializer op) { + + op.add(Builder::accessToken, JsonpDeserializer.stringDeserializer(), "access_token"); + op.add(Builder::refreshToken, JsonpDeserializer.stringDeserializer(), "refresh_token"); + + } + + // --------------------------------------------------------------------------------------------- + + /** + * Endpoint "{@code security.oidc_logout}". + */ + public static final Endpoint _ENDPOINT = new SimpleEndpoint<>( + "es/security.oidc_logout", + + // Request method + request -> { + return "POST"; + + }, + + // Request path + request -> { + return "/_security/oidc/logout"; + + }, + + // Path parameters + request -> { + return Collections.emptyMap(); + }, + + // Request parameters + request -> { + return Collections.emptyMap(); + + }, SimpleEndpoint.emptyMap(), true, OidcLogoutResponse._DESERIALIZER); +} diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/security/OidcLogoutResponse.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/security/OidcLogoutResponse.java new file mode 100644 index 000000000..3b8380a1a --- /dev/null +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/security/OidcLogoutResponse.java @@ -0,0 +1,163 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package co.elastic.clients.elasticsearch.security; + +import co.elastic.clients.json.JsonpDeserializable; +import co.elastic.clients.json.JsonpDeserializer; +import co.elastic.clients.json.JsonpMapper; +import co.elastic.clients.json.JsonpSerializable; +import co.elastic.clients.json.JsonpUtils; +import co.elastic.clients.json.ObjectBuilderDeserializer; +import co.elastic.clients.json.ObjectDeserializer; +import co.elastic.clients.util.ApiTypeHelper; +import co.elastic.clients.util.ObjectBuilder; +import co.elastic.clients.util.WithJsonObjectBuilderBase; +import jakarta.json.stream.JsonGenerator; +import java.lang.String; +import java.util.Objects; +import java.util.function.Function; +import javax.annotation.Nullable; + +//---------------------------------------------------------------- +// THIS CODE IS GENERATED. MANUAL EDITS WILL BE LOST. +//---------------------------------------------------------------- +// +// This code is generated from the Elasticsearch API specification +// at https://github.com/elastic/elasticsearch-specification +// +// Manual updates to this file will be lost when the code is +// re-generated. +// +// If you find a property that is missing or wrongly typed, please +// open an issue or a PR on the API specification repository. +// +//---------------------------------------------------------------- + +// typedef: security.oidc_logout.Response + +/** + * + * @see API + * specification + */ +@JsonpDeserializable +public class OidcLogoutResponse implements JsonpSerializable { + private final String redirect; + + // --------------------------------------------------------------------------------------------- + + private OidcLogoutResponse(Builder builder) { + + this.redirect = ApiTypeHelper.requireNonNull(builder.redirect, this, "redirect"); + + } + + public static OidcLogoutResponse of(Function> fn) { + return fn.apply(new Builder()).build(); + } + + /** + * Required - A URI that points to the end session endpoint of the OpenID + * Connect Provider with all the parameters of the logout request as HTTP GET + * parameters. + *

    + * API name: {@code redirect} + */ + public final String redirect() { + return this.redirect; + } + + /** + * Serialize this object to JSON. + */ + public void serialize(JsonGenerator generator, JsonpMapper mapper) { + generator.writeStartObject(); + serializeInternal(generator, mapper); + generator.writeEnd(); + } + + protected void serializeInternal(JsonGenerator generator, JsonpMapper mapper) { + + generator.writeKey("redirect"); + generator.write(this.redirect); + + } + + @Override + public String toString() { + return JsonpUtils.toString(this); + } + + // --------------------------------------------------------------------------------------------- + + /** + * Builder for {@link OidcLogoutResponse}. + */ + + public static class Builder extends WithJsonObjectBuilderBase + implements + ObjectBuilder { + private String redirect; + + /** + * Required - A URI that points to the end session endpoint of the OpenID + * Connect Provider with all the parameters of the logout request as HTTP GET + * parameters. + *

    + * API name: {@code redirect} + */ + public final Builder redirect(String value) { + this.redirect = value; + return this; + } + + @Override + protected Builder self() { + return this; + } + + /** + * Builds a {@link OidcLogoutResponse}. + * + * @throws NullPointerException + * if some of the required fields are null. + */ + public OidcLogoutResponse build() { + _checkSingleUse(); + + return new OidcLogoutResponse(this); + } + } + + // --------------------------------------------------------------------------------------------- + + /** + * Json deserializer for {@link OidcLogoutResponse} + */ + public static final JsonpDeserializer _DESERIALIZER = ObjectBuilderDeserializer + .lazy(Builder::new, OidcLogoutResponse::setupOidcLogoutResponseDeserializer); + + protected static void setupOidcLogoutResponseDeserializer(ObjectDeserializer op) { + + op.add(Builder::redirect, JsonpDeserializer.stringDeserializer(), "redirect"); + + } + +} diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/security/OidcPrepareAuthenticationRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/security/OidcPrepareAuthenticationRequest.java new file mode 100644 index 000000000..391cdf477 --- /dev/null +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/security/OidcPrepareAuthenticationRequest.java @@ -0,0 +1,367 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package co.elastic.clients.elasticsearch.security; + +import co.elastic.clients.elasticsearch._types.ErrorResponse; +import co.elastic.clients.elasticsearch._types.RequestBase; +import co.elastic.clients.json.JsonpDeserializable; +import co.elastic.clients.json.JsonpDeserializer; +import co.elastic.clients.json.JsonpMapper; +import co.elastic.clients.json.JsonpSerializable; +import co.elastic.clients.json.ObjectBuilderDeserializer; +import co.elastic.clients.json.ObjectDeserializer; +import co.elastic.clients.transport.Endpoint; +import co.elastic.clients.transport.endpoints.SimpleEndpoint; +import co.elastic.clients.util.ObjectBuilder; +import jakarta.json.stream.JsonGenerator; +import java.lang.String; +import java.util.Collections; +import java.util.Objects; +import java.util.function.Function; +import javax.annotation.Nullable; + +//---------------------------------------------------------------- +// THIS CODE IS GENERATED. MANUAL EDITS WILL BE LOST. +//---------------------------------------------------------------- +// +// This code is generated from the Elasticsearch API specification +// at https://github.com/elastic/elasticsearch-specification +// +// Manual updates to this file will be lost when the code is +// re-generated. +// +// If you find a property that is missing or wrongly typed, please +// open an issue or a PR on the API specification repository. +// +//---------------------------------------------------------------- + +// typedef: security.oidc_prepare_authentication.Request + +/** + * Prepare OpenID connect authentication. Create an oAuth 2.0 authentication + * request as a URL string based on the configuration of the OpenID Connect + * authentication realm in Elasticsearch. + *

    + * The response of this API is a URL pointing to the Authorization Endpoint of + * the configured OpenID Connect Provider, which can be used to redirect the + * browser of the user in order to continue the authentication process. + *

    + * Elasticsearch exposes all the necessary OpenID Connect related functionality + * with the OpenID Connect APIs. These APIs are used internally by Kibana in + * order to provide OpenID Connect based authentication, but can also be used by + * other, custom web applications or other clients. + * + * @see API + * specification + */ +@JsonpDeserializable +public class OidcPrepareAuthenticationRequest extends RequestBase implements JsonpSerializable { + @Nullable + private final String iss; + + @Nullable + private final String loginHint; + + @Nullable + private final String nonce; + + @Nullable + private final String realm; + + @Nullable + private final String state; + + // --------------------------------------------------------------------------------------------- + + private OidcPrepareAuthenticationRequest(Builder builder) { + + this.iss = builder.iss; + this.loginHint = builder.loginHint; + this.nonce = builder.nonce; + this.realm = builder.realm; + this.state = builder.state; + + } + + public static OidcPrepareAuthenticationRequest of( + Function> fn) { + return fn.apply(new Builder()).build(); + } + + /** + * In the case of a third party initiated single sign on, this is the issuer + * identifier for the OP that the RP is to send the authentication request to. + * It cannot be specified when realm is specified. One of + * realm or iss is required. + *

    + * API name: {@code iss} + */ + @Nullable + public final String iss() { + return this.iss; + } + + /** + * In the case of a third party initiated single sign on, it is a string value + * that is included in the authentication request as the login_hint + * parameter. This parameter is not valid when realm is specified. + *

    + * API name: {@code login_hint} + */ + @Nullable + public final String loginHint() { + return this.loginHint; + } + + /** + * The value used to associate a client session with an ID token and to mitigate + * replay attacks. If the caller of the API does not provide a value, + * Elasticsearch will generate one with sufficient entropy and return it in the + * response. + *

    + * API name: {@code nonce} + */ + @Nullable + public final String nonce() { + return this.nonce; + } + + /** + * The name of the OpenID Connect realm in Elasticsearch the configuration of + * which should be used in order to generate the authentication request. It + * cannot be specified when iss is specified. One of realm or + * iss is required. + *

    + * API name: {@code realm} + */ + @Nullable + public final String realm() { + return this.realm; + } + + /** + * The value used to maintain state between the authentication request and the + * response, typically used as a Cross-Site Request Forgery mitigation. If the + * caller of the API does not provide a value, Elasticsearch will generate one + * with sufficient entropy and return it in the response. + *

    + * API name: {@code state} + */ + @Nullable + public final String state() { + return this.state; + } + + /** + * Serialize this object to JSON. + */ + public void serialize(JsonGenerator generator, JsonpMapper mapper) { + generator.writeStartObject(); + serializeInternal(generator, mapper); + generator.writeEnd(); + } + + protected void serializeInternal(JsonGenerator generator, JsonpMapper mapper) { + + if (this.iss != null) { + generator.writeKey("iss"); + generator.write(this.iss); + + } + if (this.loginHint != null) { + generator.writeKey("login_hint"); + generator.write(this.loginHint); + + } + if (this.nonce != null) { + generator.writeKey("nonce"); + generator.write(this.nonce); + + } + if (this.realm != null) { + generator.writeKey("realm"); + generator.write(this.realm); + + } + if (this.state != null) { + generator.writeKey("state"); + generator.write(this.state); + + } + + } + + // --------------------------------------------------------------------------------------------- + + /** + * Builder for {@link OidcPrepareAuthenticationRequest}. + */ + + public static class Builder extends RequestBase.AbstractBuilder + implements + ObjectBuilder { + @Nullable + private String iss; + + @Nullable + private String loginHint; + + @Nullable + private String nonce; + + @Nullable + private String realm; + + @Nullable + private String state; + + /** + * In the case of a third party initiated single sign on, this is the issuer + * identifier for the OP that the RP is to send the authentication request to. + * It cannot be specified when realm is specified. One of + * realm or iss is required. + *

    + * API name: {@code iss} + */ + public final Builder iss(@Nullable String value) { + this.iss = value; + return this; + } + + /** + * In the case of a third party initiated single sign on, it is a string value + * that is included in the authentication request as the login_hint + * parameter. This parameter is not valid when realm is specified. + *

    + * API name: {@code login_hint} + */ + public final Builder loginHint(@Nullable String value) { + this.loginHint = value; + return this; + } + + /** + * The value used to associate a client session with an ID token and to mitigate + * replay attacks. If the caller of the API does not provide a value, + * Elasticsearch will generate one with sufficient entropy and return it in the + * response. + *

    + * API name: {@code nonce} + */ + public final Builder nonce(@Nullable String value) { + this.nonce = value; + return this; + } + + /** + * The name of the OpenID Connect realm in Elasticsearch the configuration of + * which should be used in order to generate the authentication request. It + * cannot be specified when iss is specified. One of realm or + * iss is required. + *

    + * API name: {@code realm} + */ + public final Builder realm(@Nullable String value) { + this.realm = value; + return this; + } + + /** + * The value used to maintain state between the authentication request and the + * response, typically used as a Cross-Site Request Forgery mitigation. If the + * caller of the API does not provide a value, Elasticsearch will generate one + * with sufficient entropy and return it in the response. + *

    + * API name: {@code state} + */ + public final Builder state(@Nullable String value) { + this.state = value; + return this; + } + + @Override + protected Builder self() { + return this; + } + + /** + * Builds a {@link OidcPrepareAuthenticationRequest}. + * + * @throws NullPointerException + * if some of the required fields are null. + */ + public OidcPrepareAuthenticationRequest build() { + _checkSingleUse(); + + return new OidcPrepareAuthenticationRequest(this); + } + } + + // --------------------------------------------------------------------------------------------- + + /** + * Json deserializer for {@link OidcPrepareAuthenticationRequest} + */ + public static final JsonpDeserializer _DESERIALIZER = ObjectBuilderDeserializer + .lazy(Builder::new, OidcPrepareAuthenticationRequest::setupOidcPrepareAuthenticationRequestDeserializer); + + protected static void setupOidcPrepareAuthenticationRequestDeserializer( + ObjectDeserializer op) { + + op.add(Builder::iss, JsonpDeserializer.stringDeserializer(), "iss"); + op.add(Builder::loginHint, JsonpDeserializer.stringDeserializer(), "login_hint"); + op.add(Builder::nonce, JsonpDeserializer.stringDeserializer(), "nonce"); + op.add(Builder::realm, JsonpDeserializer.stringDeserializer(), "realm"); + op.add(Builder::state, JsonpDeserializer.stringDeserializer(), "state"); + + } + + // --------------------------------------------------------------------------------------------- + + /** + * Endpoint "{@code security.oidc_prepare_authentication}". + */ + public static final Endpoint _ENDPOINT = new SimpleEndpoint<>( + "es/security.oidc_prepare_authentication", + + // Request method + request -> { + return "POST"; + + }, + + // Request path + request -> { + return "/_security/oidc/prepare"; + + }, + + // Path parameters + request -> { + return Collections.emptyMap(); + }, + + // Request parameters + request -> { + return Collections.emptyMap(); + + }, SimpleEndpoint.emptyMap(), true, OidcPrepareAuthenticationResponse._DESERIALIZER); +} diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/security/OidcPrepareAuthenticationResponse.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/security/OidcPrepareAuthenticationResponse.java new file mode 100644 index 000000000..7dd47417a --- /dev/null +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/security/OidcPrepareAuthenticationResponse.java @@ -0,0 +1,238 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package co.elastic.clients.elasticsearch.security; + +import co.elastic.clients.json.JsonpDeserializable; +import co.elastic.clients.json.JsonpDeserializer; +import co.elastic.clients.json.JsonpMapper; +import co.elastic.clients.json.JsonpSerializable; +import co.elastic.clients.json.JsonpUtils; +import co.elastic.clients.json.ObjectBuilderDeserializer; +import co.elastic.clients.json.ObjectDeserializer; +import co.elastic.clients.util.ApiTypeHelper; +import co.elastic.clients.util.ObjectBuilder; +import co.elastic.clients.util.WithJsonObjectBuilderBase; +import jakarta.json.stream.JsonGenerator; +import java.lang.String; +import java.util.Objects; +import java.util.function.Function; +import javax.annotation.Nullable; + +//---------------------------------------------------------------- +// THIS CODE IS GENERATED. MANUAL EDITS WILL BE LOST. +//---------------------------------------------------------------- +// +// This code is generated from the Elasticsearch API specification +// at https://github.com/elastic/elasticsearch-specification +// +// Manual updates to this file will be lost when the code is +// re-generated. +// +// If you find a property that is missing or wrongly typed, please +// open an issue or a PR on the API specification repository. +// +//---------------------------------------------------------------- + +// typedef: security.oidc_prepare_authentication.Response + +/** + * + * @see API + * specification + */ +@JsonpDeserializable +public class OidcPrepareAuthenticationResponse implements JsonpSerializable { + private final String nonce; + + private final String realm; + + private final String redirect; + + private final String state; + + // --------------------------------------------------------------------------------------------- + + private OidcPrepareAuthenticationResponse(Builder builder) { + + this.nonce = ApiTypeHelper.requireNonNull(builder.nonce, this, "nonce"); + this.realm = ApiTypeHelper.requireNonNull(builder.realm, this, "realm"); + this.redirect = ApiTypeHelper.requireNonNull(builder.redirect, this, "redirect"); + this.state = ApiTypeHelper.requireNonNull(builder.state, this, "state"); + + } + + public static OidcPrepareAuthenticationResponse of( + Function> fn) { + return fn.apply(new Builder()).build(); + } + + /** + * Required - API name: {@code nonce} + */ + public final String nonce() { + return this.nonce; + } + + /** + * Required - API name: {@code realm} + */ + public final String realm() { + return this.realm; + } + + /** + * Required - A URI that points to the authorization endpoint of the OpenID + * Connect Provider with all the parameters of the authentication request as + * HTTP GET parameters. + *

    + * API name: {@code redirect} + */ + public final String redirect() { + return this.redirect; + } + + /** + * Required - API name: {@code state} + */ + public final String state() { + return this.state; + } + + /** + * Serialize this object to JSON. + */ + public void serialize(JsonGenerator generator, JsonpMapper mapper) { + generator.writeStartObject(); + serializeInternal(generator, mapper); + generator.writeEnd(); + } + + protected void serializeInternal(JsonGenerator generator, JsonpMapper mapper) { + + generator.writeKey("nonce"); + generator.write(this.nonce); + + generator.writeKey("realm"); + generator.write(this.realm); + + generator.writeKey("redirect"); + generator.write(this.redirect); + + generator.writeKey("state"); + generator.write(this.state); + + } + + @Override + public String toString() { + return JsonpUtils.toString(this); + } + + // --------------------------------------------------------------------------------------------- + + /** + * Builder for {@link OidcPrepareAuthenticationResponse}. + */ + + public static class Builder extends WithJsonObjectBuilderBase + implements + ObjectBuilder { + private String nonce; + + private String realm; + + private String redirect; + + private String state; + + /** + * Required - API name: {@code nonce} + */ + public final Builder nonce(String value) { + this.nonce = value; + return this; + } + + /** + * Required - API name: {@code realm} + */ + public final Builder realm(String value) { + this.realm = value; + return this; + } + + /** + * Required - A URI that points to the authorization endpoint of the OpenID + * Connect Provider with all the parameters of the authentication request as + * HTTP GET parameters. + *

    + * API name: {@code redirect} + */ + public final Builder redirect(String value) { + this.redirect = value; + return this; + } + + /** + * Required - API name: {@code state} + */ + public final Builder state(String value) { + this.state = value; + return this; + } + + @Override + protected Builder self() { + return this; + } + + /** + * Builds a {@link OidcPrepareAuthenticationResponse}. + * + * @throws NullPointerException + * if some of the required fields are null. + */ + public OidcPrepareAuthenticationResponse build() { + _checkSingleUse(); + + return new OidcPrepareAuthenticationResponse(this); + } + } + + // --------------------------------------------------------------------------------------------- + + /** + * Json deserializer for {@link OidcPrepareAuthenticationResponse} + */ + public static final JsonpDeserializer _DESERIALIZER = ObjectBuilderDeserializer + .lazy(Builder::new, OidcPrepareAuthenticationResponse::setupOidcPrepareAuthenticationResponseDeserializer); + + protected static void setupOidcPrepareAuthenticationResponseDeserializer( + ObjectDeserializer op) { + + op.add(Builder::nonce, JsonpDeserializer.stringDeserializer(), "nonce"); + op.add(Builder::realm, JsonpDeserializer.stringDeserializer(), "realm"); + op.add(Builder::redirect, JsonpDeserializer.stringDeserializer(), "redirect"); + op.add(Builder::state, JsonpDeserializer.stringDeserializer(), "state"); + + } + +} diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/security/delegate_pki/Authentication.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/security/delegate_pki/Authentication.java new file mode 100644 index 000000000..af6c816cb --- /dev/null +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/security/delegate_pki/Authentication.java @@ -0,0 +1,508 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package co.elastic.clients.elasticsearch.security.delegate_pki; + +import co.elastic.clients.json.JsonData; +import co.elastic.clients.json.JsonpDeserializable; +import co.elastic.clients.json.JsonpDeserializer; +import co.elastic.clients.json.JsonpMapper; +import co.elastic.clients.json.JsonpSerializable; +import co.elastic.clients.json.JsonpUtils; +import co.elastic.clients.json.ObjectBuilderDeserializer; +import co.elastic.clients.json.ObjectDeserializer; +import co.elastic.clients.util.ApiTypeHelper; +import co.elastic.clients.util.ObjectBuilder; +import co.elastic.clients.util.WithJsonObjectBuilderBase; +import jakarta.json.stream.JsonGenerator; +import java.lang.Boolean; +import java.lang.String; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.function.Function; +import javax.annotation.Nullable; + +//---------------------------------------------------------------- +// THIS CODE IS GENERATED. MANUAL EDITS WILL BE LOST. +//---------------------------------------------------------------- +// +// This code is generated from the Elasticsearch API specification +// at https://github.com/elastic/elasticsearch-specification +// +// Manual updates to this file will be lost when the code is +// re-generated. +// +// If you find a property that is missing or wrongly typed, please +// open an issue or a PR on the API specification repository. +// +//---------------------------------------------------------------- + +// typedef: security.delegate_pki.Authentication + +/** + * + * @see API + * specification + */ +@JsonpDeserializable +public class Authentication implements JsonpSerializable { + private final String username; + + private final List roles; + + @Nullable + private final String fullName; + + @Nullable + private final String email; + + private final Map token; + + private final Map metadata; + + private final boolean enabled; + + private final AuthenticationRealm authenticationRealm; + + private final AuthenticationRealm lookupRealm; + + private final String authenticationType; + + private final Map apiKey; + + // --------------------------------------------------------------------------------------------- + + private Authentication(Builder builder) { + + this.username = ApiTypeHelper.requireNonNull(builder.username, this, "username"); + this.roles = ApiTypeHelper.unmodifiableRequired(builder.roles, this, "roles"); + this.fullName = builder.fullName; + this.email = builder.email; + this.token = ApiTypeHelper.unmodifiable(builder.token); + this.metadata = ApiTypeHelper.unmodifiableRequired(builder.metadata, this, "metadata"); + this.enabled = ApiTypeHelper.requireNonNull(builder.enabled, this, "enabled"); + this.authenticationRealm = ApiTypeHelper.requireNonNull(builder.authenticationRealm, this, + "authenticationRealm"); + this.lookupRealm = ApiTypeHelper.requireNonNull(builder.lookupRealm, this, "lookupRealm"); + this.authenticationType = ApiTypeHelper.requireNonNull(builder.authenticationType, this, "authenticationType"); + this.apiKey = ApiTypeHelper.unmodifiable(builder.apiKey); + + } + + public static Authentication of(Function> fn) { + return fn.apply(new Builder()).build(); + } + + /** + * Required - API name: {@code username} + */ + public final String username() { + return this.username; + } + + /** + * Required - API name: {@code roles} + */ + public final List roles() { + return this.roles; + } + + /** + * API name: {@code full_name} + */ + @Nullable + public final String fullName() { + return this.fullName; + } + + /** + * API name: {@code email} + */ + @Nullable + public final String email() { + return this.email; + } + + /** + * API name: {@code token} + */ + public final Map token() { + return this.token; + } + + /** + * Required - API name: {@code metadata} + */ + public final Map metadata() { + return this.metadata; + } + + /** + * Required - API name: {@code enabled} + */ + public final boolean enabled() { + return this.enabled; + } + + /** + * Required - API name: {@code authentication_realm} + */ + public final AuthenticationRealm authenticationRealm() { + return this.authenticationRealm; + } + + /** + * Required - API name: {@code lookup_realm} + */ + public final AuthenticationRealm lookupRealm() { + return this.lookupRealm; + } + + /** + * Required - API name: {@code authentication_type} + */ + public final String authenticationType() { + return this.authenticationType; + } + + /** + * API name: {@code api_key} + */ + public final Map apiKey() { + return this.apiKey; + } + + /** + * Serialize this object to JSON. + */ + public void serialize(JsonGenerator generator, JsonpMapper mapper) { + generator.writeStartObject(); + serializeInternal(generator, mapper); + generator.writeEnd(); + } + + protected void serializeInternal(JsonGenerator generator, JsonpMapper mapper) { + + generator.writeKey("username"); + generator.write(this.username); + + if (ApiTypeHelper.isDefined(this.roles)) { + generator.writeKey("roles"); + generator.writeStartArray(); + for (String item0 : this.roles) { + generator.write(item0); + + } + generator.writeEnd(); + + } + if (this.fullName != null) { + generator.writeKey("full_name"); + generator.write(this.fullName); + + } + if (this.email != null) { + generator.writeKey("email"); + generator.write(this.email); + + } + if (ApiTypeHelper.isDefined(this.token)) { + generator.writeKey("token"); + generator.writeStartObject(); + for (Map.Entry item0 : this.token.entrySet()) { + generator.writeKey(item0.getKey()); + generator.write(item0.getValue()); + + } + generator.writeEnd(); + + } + if (ApiTypeHelper.isDefined(this.metadata)) { + generator.writeKey("metadata"); + generator.writeStartObject(); + for (Map.Entry item0 : this.metadata.entrySet()) { + generator.writeKey(item0.getKey()); + item0.getValue().serialize(generator, mapper); + + } + generator.writeEnd(); + + } + generator.writeKey("enabled"); + generator.write(this.enabled); + + generator.writeKey("authentication_realm"); + this.authenticationRealm.serialize(generator, mapper); + + generator.writeKey("lookup_realm"); + this.lookupRealm.serialize(generator, mapper); + + generator.writeKey("authentication_type"); + generator.write(this.authenticationType); + + if (ApiTypeHelper.isDefined(this.apiKey)) { + generator.writeKey("api_key"); + generator.writeStartObject(); + for (Map.Entry item0 : this.apiKey.entrySet()) { + generator.writeKey(item0.getKey()); + generator.write(item0.getValue()); + + } + generator.writeEnd(); + + } + + } + + @Override + public String toString() { + return JsonpUtils.toString(this); + } + + // --------------------------------------------------------------------------------------------- + + /** + * Builder for {@link Authentication}. + */ + + public static class Builder extends WithJsonObjectBuilderBase implements ObjectBuilder { + private String username; + + private List roles; + + @Nullable + private String fullName; + + @Nullable + private String email; + + @Nullable + private Map token; + + private Map metadata; + + private Boolean enabled; + + private AuthenticationRealm authenticationRealm; + + private AuthenticationRealm lookupRealm; + + private String authenticationType; + + @Nullable + private Map apiKey; + + /** + * Required - API name: {@code username} + */ + public final Builder username(String value) { + this.username = value; + return this; + } + + /** + * Required - API name: {@code roles} + *

    + * Adds all elements of list to roles. + */ + public final Builder roles(List list) { + this.roles = _listAddAll(this.roles, list); + return this; + } + + /** + * Required - API name: {@code roles} + *

    + * Adds one or more values to roles. + */ + public final Builder roles(String value, String... values) { + this.roles = _listAdd(this.roles, value, values); + return this; + } + + /** + * API name: {@code full_name} + */ + public final Builder fullName(@Nullable String value) { + this.fullName = value; + return this; + } + + /** + * API name: {@code email} + */ + public final Builder email(@Nullable String value) { + this.email = value; + return this; + } + + /** + * API name: {@code token} + *

    + * Adds all entries of map to token. + */ + public final Builder token(Map map) { + this.token = _mapPutAll(this.token, map); + return this; + } + + /** + * API name: {@code token} + *

    + * Adds an entry to token. + */ + public final Builder token(String key, String value) { + this.token = _mapPut(this.token, key, value); + return this; + } + + /** + * Required - API name: {@code metadata} + *

    + * Adds all entries of map to metadata. + */ + public final Builder metadata(Map map) { + this.metadata = _mapPutAll(this.metadata, map); + return this; + } + + /** + * Required - API name: {@code metadata} + *

    + * Adds an entry to metadata. + */ + public final Builder metadata(String key, JsonData value) { + this.metadata = _mapPut(this.metadata, key, value); + return this; + } + + /** + * Required - API name: {@code enabled} + */ + public final Builder enabled(boolean value) { + this.enabled = value; + return this; + } + + /** + * Required - API name: {@code authentication_realm} + */ + public final Builder authenticationRealm(AuthenticationRealm value) { + this.authenticationRealm = value; + return this; + } + + /** + * Required - API name: {@code authentication_realm} + */ + public final Builder authenticationRealm( + Function> fn) { + return this.authenticationRealm(fn.apply(new AuthenticationRealm.Builder()).build()); + } + + /** + * Required - API name: {@code lookup_realm} + */ + public final Builder lookupRealm(AuthenticationRealm value) { + this.lookupRealm = value; + return this; + } + + /** + * Required - API name: {@code lookup_realm} + */ + public final Builder lookupRealm(Function> fn) { + return this.lookupRealm(fn.apply(new AuthenticationRealm.Builder()).build()); + } + + /** + * Required - API name: {@code authentication_type} + */ + public final Builder authenticationType(String value) { + this.authenticationType = value; + return this; + } + + /** + * API name: {@code api_key} + *

    + * Adds all entries of map to apiKey. + */ + public final Builder apiKey(Map map) { + this.apiKey = _mapPutAll(this.apiKey, map); + return this; + } + + /** + * API name: {@code api_key} + *

    + * Adds an entry to apiKey. + */ + public final Builder apiKey(String key, String value) { + this.apiKey = _mapPut(this.apiKey, key, value); + return this; + } + + @Override + protected Builder self() { + return this; + } + + /** + * Builds a {@link Authentication}. + * + * @throws NullPointerException + * if some of the required fields are null. + */ + public Authentication build() { + _checkSingleUse(); + + return new Authentication(this); + } + } + + // --------------------------------------------------------------------------------------------- + + /** + * Json deserializer for {@link Authentication} + */ + public static final JsonpDeserializer _DESERIALIZER = ObjectBuilderDeserializer.lazy(Builder::new, + Authentication::setupAuthenticationDeserializer); + + protected static void setupAuthenticationDeserializer(ObjectDeserializer op) { + + op.add(Builder::username, JsonpDeserializer.stringDeserializer(), "username"); + op.add(Builder::roles, JsonpDeserializer.arrayDeserializer(JsonpDeserializer.stringDeserializer()), "roles"); + op.add(Builder::fullName, JsonpDeserializer.stringDeserializer(), "full_name"); + op.add(Builder::email, JsonpDeserializer.stringDeserializer(), "email"); + op.add(Builder::token, JsonpDeserializer.stringMapDeserializer(JsonpDeserializer.stringDeserializer()), + "token"); + op.add(Builder::metadata, JsonpDeserializer.stringMapDeserializer(JsonData._DESERIALIZER), "metadata"); + op.add(Builder::enabled, JsonpDeserializer.booleanDeserializer(), "enabled"); + op.add(Builder::authenticationRealm, AuthenticationRealm._DESERIALIZER, "authentication_realm"); + op.add(Builder::lookupRealm, AuthenticationRealm._DESERIALIZER, "lookup_realm"); + op.add(Builder::authenticationType, JsonpDeserializer.stringDeserializer(), "authentication_type"); + op.add(Builder::apiKey, JsonpDeserializer.stringMapDeserializer(JsonpDeserializer.stringDeserializer()), + "api_key"); + + } + +} diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/security/delegate_pki/AuthenticationRealm.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/security/delegate_pki/AuthenticationRealm.java new file mode 100644 index 000000000..5ea349fcf --- /dev/null +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/security/delegate_pki/AuthenticationRealm.java @@ -0,0 +1,210 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package co.elastic.clients.elasticsearch.security.delegate_pki; + +import co.elastic.clients.json.JsonpDeserializable; +import co.elastic.clients.json.JsonpDeserializer; +import co.elastic.clients.json.JsonpMapper; +import co.elastic.clients.json.JsonpSerializable; +import co.elastic.clients.json.JsonpUtils; +import co.elastic.clients.json.ObjectBuilderDeserializer; +import co.elastic.clients.json.ObjectDeserializer; +import co.elastic.clients.util.ApiTypeHelper; +import co.elastic.clients.util.ObjectBuilder; +import co.elastic.clients.util.WithJsonObjectBuilderBase; +import jakarta.json.stream.JsonGenerator; +import java.lang.String; +import java.util.Objects; +import java.util.function.Function; +import javax.annotation.Nullable; + +//---------------------------------------------------------------- +// THIS CODE IS GENERATED. MANUAL EDITS WILL BE LOST. +//---------------------------------------------------------------- +// +// This code is generated from the Elasticsearch API specification +// at https://github.com/elastic/elasticsearch-specification +// +// Manual updates to this file will be lost when the code is +// re-generated. +// +// If you find a property that is missing or wrongly typed, please +// open an issue or a PR on the API specification repository. +// +//---------------------------------------------------------------- + +// typedef: security.delegate_pki.AuthenticationRealm + +/** + * + * @see API + * specification + */ +@JsonpDeserializable +public class AuthenticationRealm implements JsonpSerializable { + private final String name; + + private final String type; + + @Nullable + private final String domain; + + // --------------------------------------------------------------------------------------------- + + private AuthenticationRealm(Builder builder) { + + this.name = ApiTypeHelper.requireNonNull(builder.name, this, "name"); + this.type = ApiTypeHelper.requireNonNull(builder.type, this, "type"); + this.domain = builder.domain; + + } + + public static AuthenticationRealm of(Function> fn) { + return fn.apply(new Builder()).build(); + } + + /** + * Required - API name: {@code name} + */ + public final String name() { + return this.name; + } + + /** + * Required - API name: {@code type} + */ + public final String type() { + return this.type; + } + + /** + * API name: {@code domain} + */ + @Nullable + public final String domain() { + return this.domain; + } + + /** + * Serialize this object to JSON. + */ + public void serialize(JsonGenerator generator, JsonpMapper mapper) { + generator.writeStartObject(); + serializeInternal(generator, mapper); + generator.writeEnd(); + } + + protected void serializeInternal(JsonGenerator generator, JsonpMapper mapper) { + + generator.writeKey("name"); + generator.write(this.name); + + generator.writeKey("type"); + generator.write(this.type); + + if (this.domain != null) { + generator.writeKey("domain"); + generator.write(this.domain); + + } + + } + + @Override + public String toString() { + return JsonpUtils.toString(this); + } + + // --------------------------------------------------------------------------------------------- + + /** + * Builder for {@link AuthenticationRealm}. + */ + + public static class Builder extends WithJsonObjectBuilderBase + implements + ObjectBuilder { + private String name; + + private String type; + + @Nullable + private String domain; + + /** + * Required - API name: {@code name} + */ + public final Builder name(String value) { + this.name = value; + return this; + } + + /** + * Required - API name: {@code type} + */ + public final Builder type(String value) { + this.type = value; + return this; + } + + /** + * API name: {@code domain} + */ + public final Builder domain(@Nullable String value) { + this.domain = value; + return this; + } + + @Override + protected Builder self() { + return this; + } + + /** + * Builds a {@link AuthenticationRealm}. + * + * @throws NullPointerException + * if some of the required fields are null. + */ + public AuthenticationRealm build() { + _checkSingleUse(); + + return new AuthenticationRealm(this); + } + } + + // --------------------------------------------------------------------------------------------- + + /** + * Json deserializer for {@link AuthenticationRealm} + */ + public static final JsonpDeserializer _DESERIALIZER = ObjectBuilderDeserializer + .lazy(Builder::new, AuthenticationRealm::setupAuthenticationRealmDeserializer); + + protected static void setupAuthenticationRealmDeserializer(ObjectDeserializer op) { + + op.add(Builder::name, JsonpDeserializer.stringDeserializer(), "name"); + op.add(Builder::type, JsonpDeserializer.stringDeserializer(), "type"); + op.add(Builder::domain, JsonpDeserializer.stringDeserializer(), "domain"); + + } + +} diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/shutdown/DeleteNodeRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/shutdown/DeleteNodeRequest.java index cadea1b06..33686353f 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/shutdown/DeleteNodeRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/shutdown/DeleteNodeRequest.java @@ -56,8 +56,17 @@ // typedef: shutdown.delete_node.Request /** - * Removes a node from the shutdown list. Designed for indirect use by ECE/ESS - * and ECK. Direct use is not supported. + * Cancel node shutdown preparations. Remove a node from the shutdown list so it + * can resume normal operations. You must explicitly clear the shutdown request + * when a node rejoins the cluster or when a node has permanently left the + * cluster. Shutdown requests are never removed automatically by Elasticsearch. + *

    + * NOTE: This feature is designed for indirect use by Elastic Cloud, Elastic + * Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not + * supported. + *

    + * If the operator privileges feature is enabled, you must be an operator to use + * this API. * * @see API * specification diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/shutdown/ElasticsearchShutdownAsyncClient.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/shutdown/ElasticsearchShutdownAsyncClient.java index e32836ca0..d07b6d838 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/shutdown/ElasticsearchShutdownAsyncClient.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/shutdown/ElasticsearchShutdownAsyncClient.java @@ -70,11 +70,20 @@ public ElasticsearchShutdownAsyncClient withTransportOptions(@Nullable Transport // ----- Endpoint: shutdown.delete_node /** - * Removes a node from the shutdown list. Designed for indirect use by ECE/ESS - * and ECK. Direct use is not supported. + * Cancel node shutdown preparations. Remove a node from the shutdown list so it + * can resume normal operations. You must explicitly clear the shutdown request + * when a node rejoins the cluster or when a node has permanently left the + * cluster. Shutdown requests are never removed automatically by Elasticsearch. + *

    + * NOTE: This feature is designed for indirect use by Elastic Cloud, Elastic + * Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not + * supported. + *

    + * If the operator privileges feature is enabled, you must be an operator to use + * this API. * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/delete-shutdown.html">Documentation * on elastic.co */ @@ -86,14 +95,23 @@ public CompletableFuture deleteNode(DeleteNodeRequest reques } /** - * Removes a node from the shutdown list. Designed for indirect use by ECE/ESS - * and ECK. Direct use is not supported. + * Cancel node shutdown preparations. Remove a node from the shutdown list so it + * can resume normal operations. You must explicitly clear the shutdown request + * when a node rejoins the cluster or when a node has permanently left the + * cluster. Shutdown requests are never removed automatically by Elasticsearch. + *

    + * NOTE: This feature is designed for indirect use by Elastic Cloud, Elastic + * Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not + * supported. + *

    + * If the operator privileges feature is enabled, you must be an operator to use + * this API. * * @param fn * a function that initializes a builder to create the * {@link DeleteNodeRequest} * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/delete-shutdown.html">Documentation * on elastic.co */ @@ -105,12 +123,21 @@ public final CompletableFuture deleteNode( // ----- Endpoint: shutdown.get_node /** - * Retrieve status of a node or nodes that are currently marked as shutting - * down. Designed for indirect use by ECE/ESS and ECK. Direct use is not + * Get the shutdown status. + *

    + * Get information about nodes that are ready to be shut down, have shut down + * preparations still in progress, or have stalled. The API returns status + * information for each part of the shut down process. + *

    + * NOTE: This feature is designed for indirect use by Elasticsearch Service, + * Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not * supported. + *

    + * If the operator privileges feature is enabled, you must be an operator to use + * this API. * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/get-shutdown.html">Documentation * on elastic.co */ @@ -122,15 +149,24 @@ public CompletableFuture getNode(GetNodeRequest request) { } /** - * Retrieve status of a node or nodes that are currently marked as shutting - * down. Designed for indirect use by ECE/ESS and ECK. Direct use is not + * Get the shutdown status. + *

    + * Get information about nodes that are ready to be shut down, have shut down + * preparations still in progress, or have stalled. The API returns status + * information for each part of the shut down process. + *

    + * NOTE: This feature is designed for indirect use by Elasticsearch Service, + * Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not * supported. + *

    + * If the operator privileges feature is enabled, you must be an operator to use + * this API. * * @param fn * a function that initializes a builder to create the * {@link GetNodeRequest} * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/get-shutdown.html">Documentation * on elastic.co */ @@ -140,12 +176,21 @@ public final CompletableFuture getNode( } /** - * Retrieve status of a node or nodes that are currently marked as shutting - * down. Designed for indirect use by ECE/ESS and ECK. Direct use is not + * Get the shutdown status. + *

    + * Get information about nodes that are ready to be shut down, have shut down + * preparations still in progress, or have stalled. The API returns status + * information for each part of the shut down process. + *

    + * NOTE: This feature is designed for indirect use by Elasticsearch Service, + * Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not * supported. + *

    + * If the operator privileges feature is enabled, you must be an operator to use + * this API. * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/get-shutdown.html">Documentation * on elastic.co */ @@ -157,11 +202,32 @@ public CompletableFuture getNode() { // ----- Endpoint: shutdown.put_node /** - * Adds a node to be shut down. Designed for indirect use by ECE/ESS and ECK. - * Direct use is not supported. + * Prepare a node to be shut down. + *

    + * NOTE: This feature is designed for indirect use by Elastic Cloud, Elastic + * Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not + * supported. + *

    + * If you specify a node that is offline, it will be prepared for shut down when + * it rejoins the cluster. + *

    + * If the operator privileges feature is enabled, you must be an operator to use + * this API. + *

    + * The API migrates ongoing tasks and index shards to other nodes as needed to + * prepare a node to be restarted or shut down and removed from the cluster. + * This ensures that Elasticsearch can be stopped safely with minimal disruption + * to the cluster. + *

    + * You must specify the type of shutdown: restart, + * remove, or replace. If a node is already being + * prepared for shutdown, you can use this API to change the shutdown type. + *

    + * IMPORTANT: This API does NOT terminate the Elasticsearch process. Monitor the + * node shutdown status to determine when it is safe to stop Elasticsearch. * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/put-shutdown.html">Documentation * on elastic.co */ @@ -173,14 +239,35 @@ public CompletableFuture putNode(PutNodeRequest request) { } /** - * Adds a node to be shut down. Designed for indirect use by ECE/ESS and ECK. - * Direct use is not supported. + * Prepare a node to be shut down. + *

    + * NOTE: This feature is designed for indirect use by Elastic Cloud, Elastic + * Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not + * supported. + *

    + * If you specify a node that is offline, it will be prepared for shut down when + * it rejoins the cluster. + *

    + * If the operator privileges feature is enabled, you must be an operator to use + * this API. + *

    + * The API migrates ongoing tasks and index shards to other nodes as needed to + * prepare a node to be restarted or shut down and removed from the cluster. + * This ensures that Elasticsearch can be stopped safely with minimal disruption + * to the cluster. + *

    + * You must specify the type of shutdown: restart, + * remove, or replace. If a node is already being + * prepared for shutdown, you can use this API to change the shutdown type. + *

    + * IMPORTANT: This API does NOT terminate the Elasticsearch process. Monitor the + * node shutdown status to determine when it is safe to stop Elasticsearch. * * @param fn * a function that initializes a builder to create the * {@link PutNodeRequest} * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/put-shutdown.html">Documentation * on elastic.co */ diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/shutdown/ElasticsearchShutdownClient.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/shutdown/ElasticsearchShutdownClient.java index d697c22ee..650393790 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/shutdown/ElasticsearchShutdownClient.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/shutdown/ElasticsearchShutdownClient.java @@ -68,11 +68,20 @@ public ElasticsearchShutdownClient withTransportOptions(@Nullable TransportOptio // ----- Endpoint: shutdown.delete_node /** - * Removes a node from the shutdown list. Designed for indirect use by ECE/ESS - * and ECK. Direct use is not supported. + * Cancel node shutdown preparations. Remove a node from the shutdown list so it + * can resume normal operations. You must explicitly clear the shutdown request + * when a node rejoins the cluster or when a node has permanently left the + * cluster. Shutdown requests are never removed automatically by Elasticsearch. + *

    + * NOTE: This feature is designed for indirect use by Elastic Cloud, Elastic + * Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not + * supported. + *

    + * If the operator privileges feature is enabled, you must be an operator to use + * this API. * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/delete-shutdown.html">Documentation * on elastic.co */ @@ -84,14 +93,23 @@ public DeleteNodeResponse deleteNode(DeleteNodeRequest request) throws IOExcepti } /** - * Removes a node from the shutdown list. Designed for indirect use by ECE/ESS - * and ECK. Direct use is not supported. + * Cancel node shutdown preparations. Remove a node from the shutdown list so it + * can resume normal operations. You must explicitly clear the shutdown request + * when a node rejoins the cluster or when a node has permanently left the + * cluster. Shutdown requests are never removed automatically by Elasticsearch. + *

    + * NOTE: This feature is designed for indirect use by Elastic Cloud, Elastic + * Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not + * supported. + *

    + * If the operator privileges feature is enabled, you must be an operator to use + * this API. * * @param fn * a function that initializes a builder to create the * {@link DeleteNodeRequest} * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/delete-shutdown.html">Documentation * on elastic.co */ @@ -103,12 +121,21 @@ public final DeleteNodeResponse deleteNode(Function + * Get information about nodes that are ready to be shut down, have shut down + * preparations still in progress, or have stalled. The API returns status + * information for each part of the shut down process. + *

    + * NOTE: This feature is designed for indirect use by Elasticsearch Service, + * Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not * supported. + *

    + * If the operator privileges feature is enabled, you must be an operator to use + * this API. * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/get-shutdown.html">Documentation * on elastic.co */ @@ -120,15 +147,24 @@ public GetNodeResponse getNode(GetNodeRequest request) throws IOException, Elast } /** - * Retrieve status of a node or nodes that are currently marked as shutting - * down. Designed for indirect use by ECE/ESS and ECK. Direct use is not + * Get the shutdown status. + *

    + * Get information about nodes that are ready to be shut down, have shut down + * preparations still in progress, or have stalled. The API returns status + * information for each part of the shut down process. + *

    + * NOTE: This feature is designed for indirect use by Elasticsearch Service, + * Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not * supported. + *

    + * If the operator privileges feature is enabled, you must be an operator to use + * this API. * * @param fn * a function that initializes a builder to create the * {@link GetNodeRequest} * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/get-shutdown.html">Documentation * on elastic.co */ @@ -138,12 +174,21 @@ public final GetNodeResponse getNode(Function + * Get information about nodes that are ready to be shut down, have shut down + * preparations still in progress, or have stalled. The API returns status + * information for each part of the shut down process. + *

    + * NOTE: This feature is designed for indirect use by Elasticsearch Service, + * Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not * supported. + *

    + * If the operator privileges feature is enabled, you must be an operator to use + * this API. * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/get-shutdown.html">Documentation * on elastic.co */ @@ -155,11 +200,32 @@ public GetNodeResponse getNode() throws IOException, ElasticsearchException { // ----- Endpoint: shutdown.put_node /** - * Adds a node to be shut down. Designed for indirect use by ECE/ESS and ECK. - * Direct use is not supported. + * Prepare a node to be shut down. + *

    + * NOTE: This feature is designed for indirect use by Elastic Cloud, Elastic + * Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not + * supported. + *

    + * If you specify a node that is offline, it will be prepared for shut down when + * it rejoins the cluster. + *

    + * If the operator privileges feature is enabled, you must be an operator to use + * this API. + *

    + * The API migrates ongoing tasks and index shards to other nodes as needed to + * prepare a node to be restarted or shut down and removed from the cluster. + * This ensures that Elasticsearch can be stopped safely with minimal disruption + * to the cluster. + *

    + * You must specify the type of shutdown: restart, + * remove, or replace. If a node is already being + * prepared for shutdown, you can use this API to change the shutdown type. + *

    + * IMPORTANT: This API does NOT terminate the Elasticsearch process. Monitor the + * node shutdown status to determine when it is safe to stop Elasticsearch. * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/put-shutdown.html">Documentation * on elastic.co */ @@ -171,14 +237,35 @@ public PutNodeResponse putNode(PutNodeRequest request) throws IOException, Elast } /** - * Adds a node to be shut down. Designed for indirect use by ECE/ESS and ECK. - * Direct use is not supported. + * Prepare a node to be shut down. + *

    + * NOTE: This feature is designed for indirect use by Elastic Cloud, Elastic + * Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not + * supported. + *

    + * If you specify a node that is offline, it will be prepared for shut down when + * it rejoins the cluster. + *

    + * If the operator privileges feature is enabled, you must be an operator to use + * this API. + *

    + * The API migrates ongoing tasks and index shards to other nodes as needed to + * prepare a node to be restarted or shut down and removed from the cluster. + * This ensures that Elasticsearch can be stopped safely with minimal disruption + * to the cluster. + *

    + * You must specify the type of shutdown: restart, + * remove, or replace. If a node is already being + * prepared for shutdown, you can use this API to change the shutdown type. + *

    + * IMPORTANT: This API does NOT terminate the Elasticsearch process. Monitor the + * node shutdown status to determine when it is safe to stop Elasticsearch. * * @param fn * a function that initializes a builder to create the * {@link PutNodeRequest} * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/put-shutdown.html">Documentation * on elastic.co */ diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/shutdown/GetNodeRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/shutdown/GetNodeRequest.java index 42ee075c2..1c3dee599 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/shutdown/GetNodeRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/shutdown/GetNodeRequest.java @@ -58,9 +58,18 @@ // typedef: shutdown.get_node.Request /** - * Retrieve status of a node or nodes that are currently marked as shutting - * down. Designed for indirect use by ECE/ESS and ECK. Direct use is not + * Get the shutdown status. + *

    + * Get information about nodes that are ready to be shut down, have shut down + * preparations still in progress, or have stalled. The API returns status + * information for each part of the shut down process. + *

    + * NOTE: This feature is designed for indirect use by Elasticsearch Service, + * Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not * supported. + *

    + * If the operator privileges feature is enabled, you must be an operator to use + * this API. * * @see API * specification diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/shutdown/PutNodeRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/shutdown/PutNodeRequest.java index 0eaa0a97b..648d57de4 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/shutdown/PutNodeRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/shutdown/PutNodeRequest.java @@ -58,8 +58,29 @@ // typedef: shutdown.put_node.Request /** - * Adds a node to be shut down. Designed for indirect use by ECE/ESS and ECK. - * Direct use is not supported. + * Prepare a node to be shut down. + *

    + * NOTE: This feature is designed for indirect use by Elastic Cloud, Elastic + * Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not + * supported. + *

    + * If you specify a node that is offline, it will be prepared for shut down when + * it rejoins the cluster. + *

    + * If the operator privileges feature is enabled, you must be an operator to use + * this API. + *

    + * The API migrates ongoing tasks and index shards to other nodes as needed to + * prepare a node to be restarted or shut down and removed from the cluster. + * This ensures that Elasticsearch can be stopped safely with minimal disruption + * to the cluster. + *

    + * You must specify the type of shutdown: restart, + * remove, or replace. If a node is already being + * prepared for shutdown, you can use this API to change the shutdown type. + *

    + * IMPORTANT: This API does NOT terminate the Elasticsearch process. Monitor the + * node shutdown status to determine when it is safe to stop Elasticsearch. * * @see API * specification @@ -118,7 +139,7 @@ public final String allocationDelay() { } /** - * Period to wait for a connection to the master node. If no response is + * The period to wait for a connection to the master node. If no response is * received before the timeout expires, the request fails and returns an error. *

    * API name: {@code master_timeout} @@ -129,7 +150,9 @@ public final TimeUnit masterTimeout() { } /** - * Required - The node id of node to be shut down + * Required - The node identifier. This parameter is not validated against the + * cluster's active nodes. This enables you to register a node for shut down + * while it is offline. No error is thrown if you specify an invalid node ID. *

    * API name: {@code node_id} */ @@ -163,8 +186,8 @@ public final String targetNodeName() { } /** - * Period to wait for a response. If no response is received before the timeout - * expires, the request fails and returns an error. + * The period to wait for a response. If no response is received before the + * timeout expires, the request fails and returns an error. *

    * API name: {@code timeout} */ @@ -262,7 +285,7 @@ public final Builder allocationDelay(@Nullable String value) { } /** - * Period to wait for a connection to the master node. If no response is + * The period to wait for a connection to the master node. If no response is * received before the timeout expires, the request fails and returns an error. *

    * API name: {@code master_timeout} @@ -273,7 +296,9 @@ public final Builder masterTimeout(@Nullable TimeUnit value) { } /** - * Required - The node id of node to be shut down + * Required - The node identifier. This parameter is not validated against the + * cluster's active nodes. This enables you to register a node for shut down + * while it is offline. No error is thrown if you specify an invalid node ID. *

    * API name: {@code node_id} */ @@ -309,8 +334,8 @@ public final Builder targetNodeName(@Nullable String value) { } /** - * Period to wait for a response. If no response is received before the timeout - * expires, the request fails and returns an error. + * The period to wait for a response. If no response is received before the + * timeout expires, the request fails and returns an error. *

    * API name: {@code timeout} */ diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/slm/DeleteLifecycleRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/slm/DeleteLifecycleRequest.java index ef16269fb..58620b660 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/slm/DeleteLifecycleRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/slm/DeleteLifecycleRequest.java @@ -21,6 +21,7 @@ import co.elastic.clients.elasticsearch._types.ErrorResponse; import co.elastic.clients.elasticsearch._types.RequestBase; +import co.elastic.clients.elasticsearch._types.Time; import co.elastic.clients.json.JsonpDeserializable; import co.elastic.clients.json.JsonpDeserializer; import co.elastic.clients.json.ObjectBuilderDeserializer; @@ -31,7 +32,6 @@ import co.elastic.clients.util.ObjectBuilder; import jakarta.json.stream.JsonGenerator; import java.lang.String; -import java.util.Collections; import java.util.HashMap; import java.util.Map; import java.util.Objects; @@ -56,20 +56,30 @@ // typedef: slm.delete_lifecycle.Request /** - * Deletes an existing snapshot lifecycle policy. + * Delete a policy. Delete a snapshot lifecycle policy definition. This + * operation prevents any future snapshots from being taken but does not cancel + * in-progress snapshots or remove previously-taken snapshots. * * @see API * specification */ public class DeleteLifecycleRequest extends RequestBase { + @Nullable + private final Time masterTimeout; + private final String policyId; + @Nullable + private final Time timeout; + // --------------------------------------------------------------------------------------------- private DeleteLifecycleRequest(Builder builder) { + this.masterTimeout = builder.masterTimeout; this.policyId = ApiTypeHelper.requireNonNull(builder.policyId, this, "policyId"); + this.timeout = builder.timeout; } @@ -77,6 +87,17 @@ public static DeleteLifecycleRequest of(Function + * API name: {@code master_timeout} + */ + @Nullable + public final Time masterTimeout() { + return this.masterTimeout; + } + /** * Required - The id of the snapshot lifecycle policy to remove *

    @@ -86,6 +107,17 @@ public final String policyId() { return this.policyId; } + /** + * Period to wait for a response. If no response is received before the timeout + * expires, the request fails and returns an error. + *

    + * API name: {@code timeout} + */ + @Nullable + public final Time timeout() { + return this.timeout; + } + // --------------------------------------------------------------------------------------------- /** @@ -95,8 +127,35 @@ public final String policyId() { public static class Builder extends RequestBase.AbstractBuilder implements ObjectBuilder { + @Nullable + private Time masterTimeout; + private String policyId; + @Nullable + private Time timeout; + + /** + * Period to wait for a connection to the master node. If no response is + * received before the timeout expires, the request fails and returns an error. + *

    + * API name: {@code master_timeout} + */ + public final Builder masterTimeout(@Nullable Time value) { + this.masterTimeout = value; + return this; + } + + /** + * Period to wait for a connection to the master node. If no response is + * received before the timeout expires, the request fails and returns an error. + *

    + * API name: {@code master_timeout} + */ + public final Builder masterTimeout(Function> fn) { + return this.masterTimeout(fn.apply(new Time.Builder()).build()); + } + /** * Required - The id of the snapshot lifecycle policy to remove *

    @@ -107,6 +166,27 @@ public final Builder policyId(String value) { return this; } + /** + * Period to wait for a response. If no response is received before the timeout + * expires, the request fails and returns an error. + *

    + * API name: {@code timeout} + */ + public final Builder timeout(@Nullable Time value) { + this.timeout = value; + return this; + } + + /** + * Period to wait for a response. If no response is received before the timeout + * expires, the request fails and returns an error. + *

    + * API name: {@code timeout} + */ + public final Builder timeout(Function> fn) { + return this.timeout(fn.apply(new Time.Builder()).build()); + } + @Override protected Builder self() { return this; @@ -176,7 +256,14 @@ public DeleteLifecycleRequest build() { // Request parameters request -> { - return Collections.emptyMap(); + Map params = new HashMap<>(); + if (request.masterTimeout != null) { + params.put("master_timeout", request.masterTimeout._toJsonString()); + } + if (request.timeout != null) { + params.put("timeout", request.timeout._toJsonString()); + } + return params; }, SimpleEndpoint.emptyMap(), false, DeleteLifecycleResponse._DESERIALIZER); } diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/slm/ElasticsearchSlmAsyncClient.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/slm/ElasticsearchSlmAsyncClient.java index 43a51bc51..fee52235c 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/slm/ElasticsearchSlmAsyncClient.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/slm/ElasticsearchSlmAsyncClient.java @@ -67,7 +67,9 @@ public ElasticsearchSlmAsyncClient withTransportOptions(@Nullable TransportOptio // ----- Endpoint: slm.delete_lifecycle /** - * Deletes an existing snapshot lifecycle policy. + * Delete a policy. Delete a snapshot lifecycle policy definition. This + * operation prevents any future snapshots from being taken but does not cancel + * in-progress snapshots or remove previously-taken snapshots. * * @see Documentation @@ -82,7 +84,9 @@ public CompletableFuture deleteLifecycle(DeleteLifecycl } /** - * Deletes an existing snapshot lifecycle policy. + * Delete a policy. Delete a snapshot lifecycle policy definition. This + * operation prevents any future snapshots from being taken but does not cancel + * in-progress snapshots or remove previously-taken snapshots. * * @param fn * a function that initializes a builder to create the @@ -100,8 +104,10 @@ public final CompletableFuture deleteLifecycle( // ----- Endpoint: slm.execute_lifecycle /** - * Immediately creates a snapshot according to the lifecycle policy, without - * waiting for the scheduled time. + * Run a policy. Immediately create a snapshot according to the snapshot + * lifecycle policy without waiting for the scheduled time. The snapshot policy + * is normally applied according to its schedule, but you might want to manually + * run a policy before performing an upgrade or other maintenance. * * @see Documentation @@ -116,8 +122,10 @@ public CompletableFuture executeLifecycle(ExecuteLifec } /** - * Immediately creates a snapshot according to the lifecycle policy, without - * waiting for the scheduled time. + * Run a policy. Immediately create a snapshot according to the snapshot + * lifecycle policy without waiting for the scheduled time. The snapshot policy + * is normally applied according to its schedule, but you might want to manually + * run a policy before performing an upgrade or other maintenance. * * @param fn * a function that initializes a builder to create the @@ -135,23 +143,63 @@ public final CompletableFuture executeLifecycle( // ----- Endpoint: slm.execute_retention /** - * Deletes any snapshots that are expired according to the policy's retention - * rules. + * Run a retention policy. Manually apply the retention policy to force + * immediate removal of snapshots that are expired according to the snapshot + * lifecycle policy retention rules. The retention policy is normally applied + * according to its schedule. * * @see Documentation * on elastic.co */ + + public CompletableFuture executeRetention(ExecuteRetentionRequest request) { + @SuppressWarnings("unchecked") + JsonEndpoint endpoint = (JsonEndpoint) ExecuteRetentionRequest._ENDPOINT; + + return this.transport.performRequestAsync(request, endpoint, this.transportOptions); + } + + /** + * Run a retention policy. Manually apply the retention policy to force + * immediate removal of snapshots that are expired according to the snapshot + * lifecycle policy retention rules. The retention policy is normally applied + * according to its schedule. + * + * @param fn + * a function that initializes a builder to create the + * {@link ExecuteRetentionRequest} + * @see Documentation + * on elastic.co + */ + + public final CompletableFuture executeRetention( + Function> fn) { + return executeRetention(fn.apply(new ExecuteRetentionRequest.Builder()).build()); + } + + /** + * Run a retention policy. Manually apply the retention policy to force + * immediate removal of snapshots that are expired according to the snapshot + * lifecycle policy retention rules. The retention policy is normally applied + * according to its schedule. + * + * @see Documentation + * on elastic.co + */ + public CompletableFuture executeRetention() { - return this.transport.performRequestAsync(ExecuteRetentionRequest._INSTANCE, ExecuteRetentionRequest._ENDPOINT, - this.transportOptions); + return this.transport.performRequestAsync(new ExecuteRetentionRequest.Builder().build(), + ExecuteRetentionRequest._ENDPOINT, this.transportOptions); } // ----- Endpoint: slm.get_lifecycle /** - * Retrieves one or more snapshot lifecycle policy definitions and information - * about the latest snapshot attempts. + * Get policy information. Get snapshot lifecycle policy definitions and + * information about the latest snapshot attempts. * * @see Documentation @@ -166,8 +214,8 @@ public CompletableFuture getLifecycle(GetLifecycleRequest } /** - * Retrieves one or more snapshot lifecycle policy definitions and information - * about the latest snapshot attempts. + * Get policy information. Get snapshot lifecycle policy definitions and + * information about the latest snapshot attempts. * * @param fn * a function that initializes a builder to create the @@ -183,8 +231,8 @@ public final CompletableFuture getLifecycle( } /** - * Retrieves one or more snapshot lifecycle policy definitions and information - * about the latest snapshot attempts. + * Get policy information. Get snapshot lifecycle policy definitions and + * information about the latest snapshot attempts. * * @see Documentation @@ -199,36 +247,104 @@ public CompletableFuture getLifecycle() { // ----- Endpoint: slm.get_stats /** - * Returns global and policy-level statistics about actions taken by snapshot - * lifecycle management. + * Get snapshot lifecycle management statistics. Get global and policy-level + * statistics about actions taken by snapshot lifecycle management. * * @see Documentation * on elastic.co */ + + public CompletableFuture getStats(GetStatsRequest request) { + @SuppressWarnings("unchecked") + JsonEndpoint endpoint = (JsonEndpoint) GetStatsRequest._ENDPOINT; + + return this.transport.performRequestAsync(request, endpoint, this.transportOptions); + } + + /** + * Get snapshot lifecycle management statistics. Get global and policy-level + * statistics about actions taken by snapshot lifecycle management. + * + * @param fn + * a function that initializes a builder to create the + * {@link GetStatsRequest} + * @see Documentation + * on elastic.co + */ + + public final CompletableFuture getStats( + Function> fn) { + return getStats(fn.apply(new GetStatsRequest.Builder()).build()); + } + + /** + * Get snapshot lifecycle management statistics. Get global and policy-level + * statistics about actions taken by snapshot lifecycle management. + * + * @see Documentation + * on elastic.co + */ + public CompletableFuture getStats() { - return this.transport.performRequestAsync(GetStatsRequest._INSTANCE, GetStatsRequest._ENDPOINT, + return this.transport.performRequestAsync(new GetStatsRequest.Builder().build(), GetStatsRequest._ENDPOINT, this.transportOptions); } // ----- Endpoint: slm.get_status /** - * Retrieves the status of snapshot lifecycle management (SLM). + * Get the snapshot lifecycle management status. + * + * @see Documentation + * on elastic.co + */ + + public CompletableFuture getStatus(GetSlmStatusRequest request) { + @SuppressWarnings("unchecked") + JsonEndpoint endpoint = (JsonEndpoint) GetSlmStatusRequest._ENDPOINT; + + return this.transport.performRequestAsync(request, endpoint, this.transportOptions); + } + + /** + * Get the snapshot lifecycle management status. + * + * @param fn + * a function that initializes a builder to create the + * {@link GetSlmStatusRequest} + * @see Documentation + * on elastic.co + */ + + public final CompletableFuture getStatus( + Function> fn) { + return getStatus(fn.apply(new GetSlmStatusRequest.Builder()).build()); + } + + /** + * Get the snapshot lifecycle management status. * * @see Documentation * on elastic.co */ + public CompletableFuture getStatus() { - return this.transport.performRequestAsync(GetSlmStatusRequest._INSTANCE, GetSlmStatusRequest._ENDPOINT, - this.transportOptions); + return this.transport.performRequestAsync(new GetSlmStatusRequest.Builder().build(), + GetSlmStatusRequest._ENDPOINT, this.transportOptions); } // ----- Endpoint: slm.put_lifecycle /** - * Creates or updates a snapshot lifecycle policy. + * Create or update a policy. Create or update a snapshot lifecycle policy. If + * the policy already exists, this request increments the policy version. Only + * the latest version of a policy is stored. * * @see Documentation @@ -243,7 +359,9 @@ public CompletableFuture putLifecycle(PutLifecycleRequest } /** - * Creates or updates a snapshot lifecycle policy. + * Create or update a policy. Create or update a snapshot lifecycle policy. If + * the policy already exists, this request increments the policy version. Only + * the latest version of a policy is stored. * * @param fn * a function that initializes a builder to create the @@ -261,28 +379,128 @@ public final CompletableFuture putLifecycle( // ----- Endpoint: slm.start /** - * Turns on snapshot lifecycle management (SLM). + * Start snapshot lifecycle management. Snapshot lifecycle management (SLM) + * starts automatically when a cluster is formed. Manually starting SLM is + * necessary only if it has been stopped using the stop SLM API. * * @see Documentation * on elastic.co */ + + public CompletableFuture start(StartSlmRequest request) { + @SuppressWarnings("unchecked") + JsonEndpoint endpoint = (JsonEndpoint) StartSlmRequest._ENDPOINT; + + return this.transport.performRequestAsync(request, endpoint, this.transportOptions); + } + + /** + * Start snapshot lifecycle management. Snapshot lifecycle management (SLM) + * starts automatically when a cluster is formed. Manually starting SLM is + * necessary only if it has been stopped using the stop SLM API. + * + * @param fn + * a function that initializes a builder to create the + * {@link StartSlmRequest} + * @see Documentation + * on elastic.co + */ + + public final CompletableFuture start( + Function> fn) { + return start(fn.apply(new StartSlmRequest.Builder()).build()); + } + + /** + * Start snapshot lifecycle management. Snapshot lifecycle management (SLM) + * starts automatically when a cluster is formed. Manually starting SLM is + * necessary only if it has been stopped using the stop SLM API. + * + * @see Documentation + * on elastic.co + */ + public CompletableFuture start() { - return this.transport.performRequestAsync(StartSlmRequest._INSTANCE, StartSlmRequest._ENDPOINT, + return this.transport.performRequestAsync(new StartSlmRequest.Builder().build(), StartSlmRequest._ENDPOINT, this.transportOptions); } // ----- Endpoint: slm.stop /** - * Turns off snapshot lifecycle management (SLM). + * Stop snapshot lifecycle management. Stop all snapshot lifecycle management + * (SLM) operations and the SLM plugin. This API is useful when you are + * performing maintenance on a cluster and need to prevent SLM from performing + * any actions on your data streams or indices. Stopping SLM does not stop any + * snapshots that are in progress. You can manually trigger snapshots with the + * run snapshot lifecycle policy API even if SLM is stopped. + *

    + * The API returns a response as soon as the request is acknowledged, but the + * plugin might continue to run until in-progress operations complete and it can + * be safely stopped. Use the get snapshot lifecycle management status API to + * see if SLM is running. + * + * @see Documentation + * on elastic.co + */ + + public CompletableFuture stop(StopSlmRequest request) { + @SuppressWarnings("unchecked") + JsonEndpoint endpoint = (JsonEndpoint) StopSlmRequest._ENDPOINT; + + return this.transport.performRequestAsync(request, endpoint, this.transportOptions); + } + + /** + * Stop snapshot lifecycle management. Stop all snapshot lifecycle management + * (SLM) operations and the SLM plugin. This API is useful when you are + * performing maintenance on a cluster and need to prevent SLM from performing + * any actions on your data streams or indices. Stopping SLM does not stop any + * snapshots that are in progress. You can manually trigger snapshots with the + * run snapshot lifecycle policy API even if SLM is stopped. + *

    + * The API returns a response as soon as the request is acknowledged, but the + * plugin might continue to run until in-progress operations complete and it can + * be safely stopped. Use the get snapshot lifecycle management status API to + * see if SLM is running. * + * @param fn + * a function that initializes a builder to create the + * {@link StopSlmRequest} * @see Documentation * on elastic.co */ + + public final CompletableFuture stop( + Function> fn) { + return stop(fn.apply(new StopSlmRequest.Builder()).build()); + } + + /** + * Stop snapshot lifecycle management. Stop all snapshot lifecycle management + * (SLM) operations and the SLM plugin. This API is useful when you are + * performing maintenance on a cluster and need to prevent SLM from performing + * any actions on your data streams or indices. Stopping SLM does not stop any + * snapshots that are in progress. You can manually trigger snapshots with the + * run snapshot lifecycle policy API even if SLM is stopped. + *

    + * The API returns a response as soon as the request is acknowledged, but the + * plugin might continue to run until in-progress operations complete and it can + * be safely stopped. Use the get snapshot lifecycle management status API to + * see if SLM is running. + * + * @see Documentation + * on elastic.co + */ + public CompletableFuture stop() { - return this.transport.performRequestAsync(StopSlmRequest._INSTANCE, StopSlmRequest._ENDPOINT, + return this.transport.performRequestAsync(new StopSlmRequest.Builder().build(), StopSlmRequest._ENDPOINT, this.transportOptions); } diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/slm/ElasticsearchSlmClient.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/slm/ElasticsearchSlmClient.java index a6cd5188e..d0c1ad4b2 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/slm/ElasticsearchSlmClient.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/slm/ElasticsearchSlmClient.java @@ -68,7 +68,9 @@ public ElasticsearchSlmClient withTransportOptions(@Nullable TransportOptions tr // ----- Endpoint: slm.delete_lifecycle /** - * Deletes an existing snapshot lifecycle policy. + * Delete a policy. Delete a snapshot lifecycle policy definition. This + * operation prevents any future snapshots from being taken but does not cancel + * in-progress snapshots or remove previously-taken snapshots. * * @see Documentation @@ -84,7 +86,9 @@ public DeleteLifecycleResponse deleteLifecycle(DeleteLifecycleRequest request) } /** - * Deletes an existing snapshot lifecycle policy. + * Delete a policy. Delete a snapshot lifecycle policy definition. This + * operation prevents any future snapshots from being taken but does not cancel + * in-progress snapshots or remove previously-taken snapshots. * * @param fn * a function that initializes a builder to create the @@ -103,8 +107,10 @@ public final DeleteLifecycleResponse deleteLifecycle( // ----- Endpoint: slm.execute_lifecycle /** - * Immediately creates a snapshot according to the lifecycle policy, without - * waiting for the scheduled time. + * Run a policy. Immediately create a snapshot according to the snapshot + * lifecycle policy without waiting for the scheduled time. The snapshot policy + * is normally applied according to its schedule, but you might want to manually + * run a policy before performing an upgrade or other maintenance. * * @see Documentation @@ -120,8 +126,10 @@ public ExecuteLifecycleResponse executeLifecycle(ExecuteLifecycleRequest request } /** - * Immediately creates a snapshot according to the lifecycle policy, without - * waiting for the scheduled time. + * Run a policy. Immediately create a snapshot according to the snapshot + * lifecycle policy without waiting for the scheduled time. The snapshot policy + * is normally applied according to its schedule, but you might want to manually + * run a policy before performing an upgrade or other maintenance. * * @param fn * a function that initializes a builder to create the @@ -140,23 +148,65 @@ public final ExecuteLifecycleResponse executeLifecycle( // ----- Endpoint: slm.execute_retention /** - * Deletes any snapshots that are expired according to the policy's retention - * rules. + * Run a retention policy. Manually apply the retention policy to force + * immediate removal of snapshots that are expired according to the snapshot + * lifecycle policy retention rules. The retention policy is normally applied + * according to its schedule. * * @see Documentation * on elastic.co */ + + public ExecuteRetentionResponse executeRetention(ExecuteRetentionRequest request) + throws IOException, ElasticsearchException { + @SuppressWarnings("unchecked") + JsonEndpoint endpoint = (JsonEndpoint) ExecuteRetentionRequest._ENDPOINT; + + return this.transport.performRequest(request, endpoint, this.transportOptions); + } + + /** + * Run a retention policy. Manually apply the retention policy to force + * immediate removal of snapshots that are expired according to the snapshot + * lifecycle policy retention rules. The retention policy is normally applied + * according to its schedule. + * + * @param fn + * a function that initializes a builder to create the + * {@link ExecuteRetentionRequest} + * @see Documentation + * on elastic.co + */ + + public final ExecuteRetentionResponse executeRetention( + Function> fn) + throws IOException, ElasticsearchException { + return executeRetention(fn.apply(new ExecuteRetentionRequest.Builder()).build()); + } + + /** + * Run a retention policy. Manually apply the retention policy to force + * immediate removal of snapshots that are expired according to the snapshot + * lifecycle policy retention rules. The retention policy is normally applied + * according to its schedule. + * + * @see Documentation + * on elastic.co + */ + public ExecuteRetentionResponse executeRetention() throws IOException, ElasticsearchException { - return this.transport.performRequest(ExecuteRetentionRequest._INSTANCE, ExecuteRetentionRequest._ENDPOINT, - this.transportOptions); + return this.transport.performRequest(new ExecuteRetentionRequest.Builder().build(), + ExecuteRetentionRequest._ENDPOINT, this.transportOptions); } // ----- Endpoint: slm.get_lifecycle /** - * Retrieves one or more snapshot lifecycle policy definitions and information - * about the latest snapshot attempts. + * Get policy information. Get snapshot lifecycle policy definitions and + * information about the latest snapshot attempts. * * @see Documentation @@ -171,8 +221,8 @@ public GetLifecycleResponse getLifecycle(GetLifecycleRequest request) throws IOE } /** - * Retrieves one or more snapshot lifecycle policy definitions and information - * about the latest snapshot attempts. + * Get policy information. Get snapshot lifecycle policy definitions and + * information about the latest snapshot attempts. * * @param fn * a function that initializes a builder to create the @@ -189,8 +239,8 @@ public final GetLifecycleResponse getLifecycle( } /** - * Retrieves one or more snapshot lifecycle policy definitions and information - * about the latest snapshot attempts. + * Get policy information. Get snapshot lifecycle policy definitions and + * information about the latest snapshot attempts. * * @see Documentation @@ -205,36 +255,105 @@ public GetLifecycleResponse getLifecycle() throws IOException, ElasticsearchExce // ----- Endpoint: slm.get_stats /** - * Returns global and policy-level statistics about actions taken by snapshot - * lifecycle management. + * Get snapshot lifecycle management statistics. Get global and policy-level + * statistics about actions taken by snapshot lifecycle management. + * + * @see Documentation + * on elastic.co + */ + + public GetStatsResponse getStats(GetStatsRequest request) throws IOException, ElasticsearchException { + @SuppressWarnings("unchecked") + JsonEndpoint endpoint = (JsonEndpoint) GetStatsRequest._ENDPOINT; + + return this.transport.performRequest(request, endpoint, this.transportOptions); + } + + /** + * Get snapshot lifecycle management statistics. Get global and policy-level + * statistics about actions taken by snapshot lifecycle management. + * + * @param fn + * a function that initializes a builder to create the + * {@link GetStatsRequest} + * @see Documentation + * on elastic.co + */ + + public final GetStatsResponse getStats(Function> fn) + throws IOException, ElasticsearchException { + return getStats(fn.apply(new GetStatsRequest.Builder()).build()); + } + + /** + * Get snapshot lifecycle management statistics. Get global and policy-level + * statistics about actions taken by snapshot lifecycle management. * * @see Documentation * on elastic.co */ + public GetStatsResponse getStats() throws IOException, ElasticsearchException { - return this.transport.performRequest(GetStatsRequest._INSTANCE, GetStatsRequest._ENDPOINT, + return this.transport.performRequest(new GetStatsRequest.Builder().build(), GetStatsRequest._ENDPOINT, this.transportOptions); } // ----- Endpoint: slm.get_status /** - * Retrieves the status of snapshot lifecycle management (SLM). + * Get the snapshot lifecycle management status. + * + * @see Documentation + * on elastic.co + */ + + public GetSlmStatusResponse getStatus(GetSlmStatusRequest request) throws IOException, ElasticsearchException { + @SuppressWarnings("unchecked") + JsonEndpoint endpoint = (JsonEndpoint) GetSlmStatusRequest._ENDPOINT; + + return this.transport.performRequest(request, endpoint, this.transportOptions); + } + + /** + * Get the snapshot lifecycle management status. + * + * @param fn + * a function that initializes a builder to create the + * {@link GetSlmStatusRequest} + * @see Documentation + * on elastic.co + */ + + public final GetSlmStatusResponse getStatus( + Function> fn) + throws IOException, ElasticsearchException { + return getStatus(fn.apply(new GetSlmStatusRequest.Builder()).build()); + } + + /** + * Get the snapshot lifecycle management status. * * @see Documentation * on elastic.co */ + public GetSlmStatusResponse getStatus() throws IOException, ElasticsearchException { - return this.transport.performRequest(GetSlmStatusRequest._INSTANCE, GetSlmStatusRequest._ENDPOINT, + return this.transport.performRequest(new GetSlmStatusRequest.Builder().build(), GetSlmStatusRequest._ENDPOINT, this.transportOptions); } // ----- Endpoint: slm.put_lifecycle /** - * Creates or updates a snapshot lifecycle policy. + * Create or update a policy. Create or update a snapshot lifecycle policy. If + * the policy already exists, this request increments the policy version. Only + * the latest version of a policy is stored. * * @see Documentation @@ -249,7 +368,9 @@ public PutLifecycleResponse putLifecycle(PutLifecycleRequest request) throws IOE } /** - * Creates or updates a snapshot lifecycle policy. + * Create or update a policy. Create or update a snapshot lifecycle policy. If + * the policy already exists, this request increments the policy version. Only + * the latest version of a policy is stored. * * @param fn * a function that initializes a builder to create the @@ -268,28 +389,129 @@ public final PutLifecycleResponse putLifecycle( // ----- Endpoint: slm.start /** - * Turns on snapshot lifecycle management (SLM). + * Start snapshot lifecycle management. Snapshot lifecycle management (SLM) + * starts automatically when a cluster is formed. Manually starting SLM is + * necessary only if it has been stopped using the stop SLM API. + * + * @see Documentation + * on elastic.co + */ + + public StartSlmResponse start(StartSlmRequest request) throws IOException, ElasticsearchException { + @SuppressWarnings("unchecked") + JsonEndpoint endpoint = (JsonEndpoint) StartSlmRequest._ENDPOINT; + + return this.transport.performRequest(request, endpoint, this.transportOptions); + } + + /** + * Start snapshot lifecycle management. Snapshot lifecycle management (SLM) + * starts automatically when a cluster is formed. Manually starting SLM is + * necessary only if it has been stopped using the stop SLM API. + * + * @param fn + * a function that initializes a builder to create the + * {@link StartSlmRequest} + * @see Documentation + * on elastic.co + */ + + public final StartSlmResponse start(Function> fn) + throws IOException, ElasticsearchException { + return start(fn.apply(new StartSlmRequest.Builder()).build()); + } + + /** + * Start snapshot lifecycle management. Snapshot lifecycle management (SLM) + * starts automatically when a cluster is formed. Manually starting SLM is + * necessary only if it has been stopped using the stop SLM API. * * @see Documentation * on elastic.co */ + public StartSlmResponse start() throws IOException, ElasticsearchException { - return this.transport.performRequest(StartSlmRequest._INSTANCE, StartSlmRequest._ENDPOINT, + return this.transport.performRequest(new StartSlmRequest.Builder().build(), StartSlmRequest._ENDPOINT, this.transportOptions); } // ----- Endpoint: slm.stop /** - * Turns off snapshot lifecycle management (SLM). + * Stop snapshot lifecycle management. Stop all snapshot lifecycle management + * (SLM) operations and the SLM plugin. This API is useful when you are + * performing maintenance on a cluster and need to prevent SLM from performing + * any actions on your data streams or indices. Stopping SLM does not stop any + * snapshots that are in progress. You can manually trigger snapshots with the + * run snapshot lifecycle policy API even if SLM is stopped. + *

    + * The API returns a response as soon as the request is acknowledged, but the + * plugin might continue to run until in-progress operations complete and it can + * be safely stopped. Use the get snapshot lifecycle management status API to + * see if SLM is running. + * + * @see Documentation + * on elastic.co + */ + + public StopSlmResponse stop(StopSlmRequest request) throws IOException, ElasticsearchException { + @SuppressWarnings("unchecked") + JsonEndpoint endpoint = (JsonEndpoint) StopSlmRequest._ENDPOINT; + + return this.transport.performRequest(request, endpoint, this.transportOptions); + } + + /** + * Stop snapshot lifecycle management. Stop all snapshot lifecycle management + * (SLM) operations and the SLM plugin. This API is useful when you are + * performing maintenance on a cluster and need to prevent SLM from performing + * any actions on your data streams or indices. Stopping SLM does not stop any + * snapshots that are in progress. You can manually trigger snapshots with the + * run snapshot lifecycle policy API even if SLM is stopped. + *

    + * The API returns a response as soon as the request is acknowledged, but the + * plugin might continue to run until in-progress operations complete and it can + * be safely stopped. Use the get snapshot lifecycle management status API to + * see if SLM is running. + * + * @param fn + * a function that initializes a builder to create the + * {@link StopSlmRequest} + * @see Documentation + * on elastic.co + */ + + public final StopSlmResponse stop(Function> fn) + throws IOException, ElasticsearchException { + return stop(fn.apply(new StopSlmRequest.Builder()).build()); + } + + /** + * Stop snapshot lifecycle management. Stop all snapshot lifecycle management + * (SLM) operations and the SLM plugin. This API is useful when you are + * performing maintenance on a cluster and need to prevent SLM from performing + * any actions on your data streams or indices. Stopping SLM does not stop any + * snapshots that are in progress. You can manually trigger snapshots with the + * run snapshot lifecycle policy API even if SLM is stopped. + *

    + * The API returns a response as soon as the request is acknowledged, but the + * plugin might continue to run until in-progress operations complete and it can + * be safely stopped. Use the get snapshot lifecycle management status API to + * see if SLM is running. * * @see Documentation * on elastic.co */ + public StopSlmResponse stop() throws IOException, ElasticsearchException { - return this.transport.performRequest(StopSlmRequest._INSTANCE, StopSlmRequest._ENDPOINT, this.transportOptions); + return this.transport.performRequest(new StopSlmRequest.Builder().build(), StopSlmRequest._ENDPOINT, + this.transportOptions); } } diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/slm/ExecuteLifecycleRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/slm/ExecuteLifecycleRequest.java index fd0983d7e..2f4fb92d6 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/slm/ExecuteLifecycleRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/slm/ExecuteLifecycleRequest.java @@ -21,6 +21,7 @@ import co.elastic.clients.elasticsearch._types.ErrorResponse; import co.elastic.clients.elasticsearch._types.RequestBase; +import co.elastic.clients.elasticsearch._types.Time; import co.elastic.clients.json.JsonpDeserializable; import co.elastic.clients.json.JsonpDeserializer; import co.elastic.clients.json.ObjectBuilderDeserializer; @@ -31,7 +32,6 @@ import co.elastic.clients.util.ObjectBuilder; import jakarta.json.stream.JsonGenerator; import java.lang.String; -import java.util.Collections; import java.util.HashMap; import java.util.Map; import java.util.Objects; @@ -56,21 +56,31 @@ // typedef: slm.execute_lifecycle.Request /** - * Immediately creates a snapshot according to the lifecycle policy, without - * waiting for the scheduled time. + * Run a policy. Immediately create a snapshot according to the snapshot + * lifecycle policy without waiting for the scheduled time. The snapshot policy + * is normally applied according to its schedule, but you might want to manually + * run a policy before performing an upgrade or other maintenance. * * @see API * specification */ public class ExecuteLifecycleRequest extends RequestBase { + @Nullable + private final Time masterTimeout; + private final String policyId; + @Nullable + private final Time timeout; + // --------------------------------------------------------------------------------------------- private ExecuteLifecycleRequest(Builder builder) { + this.masterTimeout = builder.masterTimeout; this.policyId = ApiTypeHelper.requireNonNull(builder.policyId, this, "policyId"); + this.timeout = builder.timeout; } @@ -78,6 +88,17 @@ public static ExecuteLifecycleRequest of(Function + * API name: {@code master_timeout} + */ + @Nullable + public final Time masterTimeout() { + return this.masterTimeout; + } + /** * Required - The id of the snapshot lifecycle policy to be executed *

    @@ -87,6 +108,17 @@ public final String policyId() { return this.policyId; } + /** + * Period to wait for a response. If no response is received before the timeout + * expires, the request fails and returns an error. + *

    + * API name: {@code timeout} + */ + @Nullable + public final Time timeout() { + return this.timeout; + } + // --------------------------------------------------------------------------------------------- /** @@ -96,8 +128,35 @@ public final String policyId() { public static class Builder extends RequestBase.AbstractBuilder implements ObjectBuilder { + @Nullable + private Time masterTimeout; + private String policyId; + @Nullable + private Time timeout; + + /** + * Period to wait for a connection to the master node. If no response is + * received before the timeout expires, the request fails and returns an error. + *

    + * API name: {@code master_timeout} + */ + public final Builder masterTimeout(@Nullable Time value) { + this.masterTimeout = value; + return this; + } + + /** + * Period to wait for a connection to the master node. If no response is + * received before the timeout expires, the request fails and returns an error. + *

    + * API name: {@code master_timeout} + */ + public final Builder masterTimeout(Function> fn) { + return this.masterTimeout(fn.apply(new Time.Builder()).build()); + } + /** * Required - The id of the snapshot lifecycle policy to be executed *

    @@ -108,6 +167,27 @@ public final Builder policyId(String value) { return this; } + /** + * Period to wait for a response. If no response is received before the timeout + * expires, the request fails and returns an error. + *

    + * API name: {@code timeout} + */ + public final Builder timeout(@Nullable Time value) { + this.timeout = value; + return this; + } + + /** + * Period to wait for a response. If no response is received before the timeout + * expires, the request fails and returns an error. + *

    + * API name: {@code timeout} + */ + public final Builder timeout(Function> fn) { + return this.timeout(fn.apply(new Time.Builder()).build()); + } + @Override protected Builder self() { return this; @@ -178,7 +258,14 @@ public ExecuteLifecycleRequest build() { // Request parameters request -> { - return Collections.emptyMap(); + Map params = new HashMap<>(); + if (request.masterTimeout != null) { + params.put("master_timeout", request.masterTimeout._toJsonString()); + } + if (request.timeout != null) { + params.put("timeout", request.timeout._toJsonString()); + } + return params; }, SimpleEndpoint.emptyMap(), false, ExecuteLifecycleResponse._DESERIALIZER); } diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/slm/ExecuteRetentionRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/slm/ExecuteRetentionRequest.java index 57ec5c733..396489e5d 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/slm/ExecuteRetentionRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/slm/ExecuteRetentionRequest.java @@ -21,6 +21,7 @@ import co.elastic.clients.elasticsearch._types.ErrorResponse; import co.elastic.clients.elasticsearch._types.RequestBase; +import co.elastic.clients.elasticsearch._types.Time; import co.elastic.clients.json.JsonpDeserializable; import co.elastic.clients.json.JsonpDeserializer; import co.elastic.clients.json.ObjectBuilderDeserializer; @@ -30,7 +31,11 @@ import co.elastic.clients.util.ObjectBuilder; import jakarta.json.stream.JsonGenerator; import java.util.Collections; +import java.util.HashMap; +import java.util.Map; import java.util.Objects; +import java.util.function.Function; +import javax.annotation.Nullable; //---------------------------------------------------------------- // THIS CODE IS GENERATED. MANUAL EDITS WILL BE LOST. @@ -50,21 +55,131 @@ // typedef: slm.execute_retention.Request /** - * Deletes any snapshots that are expired according to the policy's retention - * rules. + * Run a retention policy. Manually apply the retention policy to force + * immediate removal of snapshots that are expired according to the snapshot + * lifecycle policy retention rules. The retention policy is normally applied + * according to its schedule. * * @see API * specification */ public class ExecuteRetentionRequest extends RequestBase { - public ExecuteRetentionRequest() { + @Nullable + private final Time masterTimeout; + + @Nullable + private final Time timeout; + + // --------------------------------------------------------------------------------------------- + + private ExecuteRetentionRequest(Builder builder) { + + this.masterTimeout = builder.masterTimeout; + this.timeout = builder.timeout; + + } + + public static ExecuteRetentionRequest of(Function> fn) { + return fn.apply(new Builder()).build(); } /** - * Singleton instance for {@link ExecuteRetentionRequest}. + * Period to wait for a connection to the master node. If no response is + * received before the timeout expires, the request fails and returns an error. + *

    + * API name: {@code master_timeout} */ - public static final ExecuteRetentionRequest _INSTANCE = new ExecuteRetentionRequest(); + @Nullable + public final Time masterTimeout() { + return this.masterTimeout; + } + + /** + * Period to wait for a response. If no response is received before the timeout + * expires, the request fails and returns an error. + *

    + * API name: {@code timeout} + */ + @Nullable + public final Time timeout() { + return this.timeout; + } + + // --------------------------------------------------------------------------------------------- + + /** + * Builder for {@link ExecuteRetentionRequest}. + */ + + public static class Builder extends RequestBase.AbstractBuilder + implements + ObjectBuilder { + @Nullable + private Time masterTimeout; + + @Nullable + private Time timeout; + + /** + * Period to wait for a connection to the master node. If no response is + * received before the timeout expires, the request fails and returns an error. + *

    + * API name: {@code master_timeout} + */ + public final Builder masterTimeout(@Nullable Time value) { + this.masterTimeout = value; + return this; + } + + /** + * Period to wait for a connection to the master node. If no response is + * received before the timeout expires, the request fails and returns an error. + *

    + * API name: {@code master_timeout} + */ + public final Builder masterTimeout(Function> fn) { + return this.masterTimeout(fn.apply(new Time.Builder()).build()); + } + + /** + * Period to wait for a response. If no response is received before the timeout + * expires, the request fails and returns an error. + *

    + * API name: {@code timeout} + */ + public final Builder timeout(@Nullable Time value) { + this.timeout = value; + return this; + } + + /** + * Period to wait for a response. If no response is received before the timeout + * expires, the request fails and returns an error. + *

    + * API name: {@code timeout} + */ + public final Builder timeout(Function> fn) { + return this.timeout(fn.apply(new Time.Builder()).build()); + } + + @Override + protected Builder self() { + return this; + } + + /** + * Builds a {@link ExecuteRetentionRequest}. + * + * @throws NullPointerException + * if some of the required fields are null. + */ + public ExecuteRetentionRequest build() { + _checkSingleUse(); + + return new ExecuteRetentionRequest(this); + } + } // --------------------------------------------------------------------------------------------- @@ -93,7 +208,14 @@ public ExecuteRetentionRequest() { // Request parameters request -> { - return Collections.emptyMap(); + Map params = new HashMap<>(); + if (request.masterTimeout != null) { + params.put("master_timeout", request.masterTimeout._toJsonString()); + } + if (request.timeout != null) { + params.put("timeout", request.timeout._toJsonString()); + } + return params; }, SimpleEndpoint.emptyMap(), false, ExecuteRetentionResponse._DESERIALIZER); } diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/slm/GetLifecycleRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/slm/GetLifecycleRequest.java index 6df555883..b4c1871fa 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/slm/GetLifecycleRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/slm/GetLifecycleRequest.java @@ -21,6 +21,7 @@ import co.elastic.clients.elasticsearch._types.ErrorResponse; import co.elastic.clients.elasticsearch._types.RequestBase; +import co.elastic.clients.elasticsearch._types.Time; import co.elastic.clients.json.JsonpDeserializable; import co.elastic.clients.json.JsonpDeserializer; import co.elastic.clients.json.ObjectBuilderDeserializer; @@ -31,7 +32,6 @@ import co.elastic.clients.util.ObjectBuilder; import jakarta.json.stream.JsonGenerator; import java.lang.String; -import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -58,21 +58,29 @@ // typedef: slm.get_lifecycle.Request /** - * Retrieves one or more snapshot lifecycle policy definitions and information - * about the latest snapshot attempts. + * Get policy information. Get snapshot lifecycle policy definitions and + * information about the latest snapshot attempts. * * @see API * specification */ public class GetLifecycleRequest extends RequestBase { + @Nullable + private final Time masterTimeout; + private final List policyId; + @Nullable + private final Time timeout; + // --------------------------------------------------------------------------------------------- private GetLifecycleRequest(Builder builder) { + this.masterTimeout = builder.masterTimeout; this.policyId = ApiTypeHelper.unmodifiable(builder.policyId); + this.timeout = builder.timeout; } @@ -80,6 +88,17 @@ public static GetLifecycleRequest of(Function + * API name: {@code master_timeout} + */ + @Nullable + public final Time masterTimeout() { + return this.masterTimeout; + } + /** * Comma-separated list of snapshot lifecycle policies to retrieve *

    @@ -89,6 +108,17 @@ public final List policyId() { return this.policyId; } + /** + * Period to wait for a response. If no response is received before the timeout + * expires, the request fails and returns an error. + *

    + * API name: {@code timeout} + */ + @Nullable + public final Time timeout() { + return this.timeout; + } + // --------------------------------------------------------------------------------------------- /** @@ -98,9 +128,36 @@ public final List policyId() { public static class Builder extends RequestBase.AbstractBuilder implements ObjectBuilder { + @Nullable + private Time masterTimeout; + @Nullable private List policyId; + @Nullable + private Time timeout; + + /** + * Period to wait for a connection to the master node. If no response is + * received before the timeout expires, the request fails and returns an error. + *

    + * API name: {@code master_timeout} + */ + public final Builder masterTimeout(@Nullable Time value) { + this.masterTimeout = value; + return this; + } + + /** + * Period to wait for a connection to the master node. If no response is + * received before the timeout expires, the request fails and returns an error. + *

    + * API name: {@code master_timeout} + */ + public final Builder masterTimeout(Function> fn) { + return this.masterTimeout(fn.apply(new Time.Builder()).build()); + } + /** * Comma-separated list of snapshot lifecycle policies to retrieve *

    @@ -125,6 +182,27 @@ public final Builder policyId(String value, String... values) { return this; } + /** + * Period to wait for a response. If no response is received before the timeout + * expires, the request fails and returns an error. + *

    + * API name: {@code timeout} + */ + public final Builder timeout(@Nullable Time value) { + this.timeout = value; + return this; + } + + /** + * Period to wait for a response. If no response is received before the timeout + * expires, the request fails and returns an error. + *

    + * API name: {@code timeout} + */ + public final Builder timeout(Function> fn) { + return this.timeout(fn.apply(new Time.Builder()).build()); + } + @Override protected Builder self() { return this; @@ -205,7 +283,14 @@ public GetLifecycleRequest build() { // Request parameters request -> { - return Collections.emptyMap(); + Map params = new HashMap<>(); + if (request.masterTimeout != null) { + params.put("master_timeout", request.masterTimeout._toJsonString()); + } + if (request.timeout != null) { + params.put("timeout", request.timeout._toJsonString()); + } + return params; }, SimpleEndpoint.emptyMap(), false, GetLifecycleResponse._DESERIALIZER); } diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/slm/GetSlmStatusRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/slm/GetSlmStatusRequest.java index d575fbbe7..7af2bf718 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/slm/GetSlmStatusRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/slm/GetSlmStatusRequest.java @@ -21,6 +21,7 @@ import co.elastic.clients.elasticsearch._types.ErrorResponse; import co.elastic.clients.elasticsearch._types.RequestBase; +import co.elastic.clients.elasticsearch._types.Time; import co.elastic.clients.json.JsonpDeserializable; import co.elastic.clients.json.JsonpDeserializer; import co.elastic.clients.json.ObjectBuilderDeserializer; @@ -30,7 +31,11 @@ import co.elastic.clients.util.ObjectBuilder; import jakarta.json.stream.JsonGenerator; import java.util.Collections; +import java.util.HashMap; +import java.util.Map; import java.util.Objects; +import java.util.function.Function; +import javax.annotation.Nullable; //---------------------------------------------------------------- // THIS CODE IS GENERATED. MANUAL EDITS WILL BE LOST. @@ -50,20 +55,128 @@ // typedef: slm.get_status.Request /** - * Retrieves the status of snapshot lifecycle management (SLM). + * Get the snapshot lifecycle management status. * * @see API * specification */ public class GetSlmStatusRequest extends RequestBase { - public GetSlmStatusRequest() { + @Nullable + private final Time masterTimeout; + + @Nullable + private final Time timeout; + + // --------------------------------------------------------------------------------------------- + + private GetSlmStatusRequest(Builder builder) { + + this.masterTimeout = builder.masterTimeout; + this.timeout = builder.timeout; + + } + + public static GetSlmStatusRequest of(Function> fn) { + return fn.apply(new Builder()).build(); } /** - * Singleton instance for {@link GetSlmStatusRequest}. + * Period to wait for a connection to the master node. If no response is + * received before the timeout expires, the request fails and returns an error. + *

    + * API name: {@code master_timeout} */ - public static final GetSlmStatusRequest _INSTANCE = new GetSlmStatusRequest(); + @Nullable + public final Time masterTimeout() { + return this.masterTimeout; + } + + /** + * Period to wait for a response. If no response is received before the timeout + * expires, the request fails and returns an error. + *

    + * API name: {@code timeout} + */ + @Nullable + public final Time timeout() { + return this.timeout; + } + + // --------------------------------------------------------------------------------------------- + + /** + * Builder for {@link GetSlmStatusRequest}. + */ + + public static class Builder extends RequestBase.AbstractBuilder + implements + ObjectBuilder { + @Nullable + private Time masterTimeout; + + @Nullable + private Time timeout; + + /** + * Period to wait for a connection to the master node. If no response is + * received before the timeout expires, the request fails and returns an error. + *

    + * API name: {@code master_timeout} + */ + public final Builder masterTimeout(@Nullable Time value) { + this.masterTimeout = value; + return this; + } + + /** + * Period to wait for a connection to the master node. If no response is + * received before the timeout expires, the request fails and returns an error. + *

    + * API name: {@code master_timeout} + */ + public final Builder masterTimeout(Function> fn) { + return this.masterTimeout(fn.apply(new Time.Builder()).build()); + } + + /** + * Period to wait for a response. If no response is received before the timeout + * expires, the request fails and returns an error. + *

    + * API name: {@code timeout} + */ + public final Builder timeout(@Nullable Time value) { + this.timeout = value; + return this; + } + + /** + * Period to wait for a response. If no response is received before the timeout + * expires, the request fails and returns an error. + *

    + * API name: {@code timeout} + */ + public final Builder timeout(Function> fn) { + return this.timeout(fn.apply(new Time.Builder()).build()); + } + + @Override + protected Builder self() { + return this; + } + + /** + * Builds a {@link GetSlmStatusRequest}. + * + * @throws NullPointerException + * if some of the required fields are null. + */ + public GetSlmStatusRequest build() { + _checkSingleUse(); + + return new GetSlmStatusRequest(this); + } + } // --------------------------------------------------------------------------------------------- @@ -92,7 +205,14 @@ public GetSlmStatusRequest() { // Request parameters request -> { - return Collections.emptyMap(); + Map params = new HashMap<>(); + if (request.masterTimeout != null) { + params.put("master_timeout", request.masterTimeout._toJsonString()); + } + if (request.timeout != null) { + params.put("timeout", request.timeout._toJsonString()); + } + return params; }, SimpleEndpoint.emptyMap(), false, GetSlmStatusResponse._DESERIALIZER); } diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/slm/GetStatsRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/slm/GetStatsRequest.java index 9fd0ed656..e758a81f0 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/slm/GetStatsRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/slm/GetStatsRequest.java @@ -21,6 +21,7 @@ import co.elastic.clients.elasticsearch._types.ErrorResponse; import co.elastic.clients.elasticsearch._types.RequestBase; +import co.elastic.clients.elasticsearch._types.Time; import co.elastic.clients.json.JsonpDeserializable; import co.elastic.clients.json.JsonpDeserializer; import co.elastic.clients.json.ObjectBuilderDeserializer; @@ -30,7 +31,11 @@ import co.elastic.clients.util.ObjectBuilder; import jakarta.json.stream.JsonGenerator; import java.util.Collections; +import java.util.HashMap; +import java.util.Map; import java.util.Objects; +import java.util.function.Function; +import javax.annotation.Nullable; //---------------------------------------------------------------- // THIS CODE IS GENERATED. MANUAL EDITS WILL BE LOST. @@ -50,21 +55,127 @@ // typedef: slm.get_stats.Request /** - * Returns global and policy-level statistics about actions taken by snapshot - * lifecycle management. + * Get snapshot lifecycle management statistics. Get global and policy-level + * statistics about actions taken by snapshot lifecycle management. * * @see API * specification */ public class GetStatsRequest extends RequestBase { - public GetStatsRequest() { + @Nullable + private final Time masterTimeout; + + @Nullable + private final Time timeout; + + // --------------------------------------------------------------------------------------------- + + private GetStatsRequest(Builder builder) { + + this.masterTimeout = builder.masterTimeout; + this.timeout = builder.timeout; + + } + + public static GetStatsRequest of(Function> fn) { + return fn.apply(new Builder()).build(); } /** - * Singleton instance for {@link GetStatsRequest}. + * Period to wait for a connection to the master node. If no response is + * received before the timeout expires, the request fails and returns an error. + *

    + * API name: {@code master_timeout} */ - public static final GetStatsRequest _INSTANCE = new GetStatsRequest(); + @Nullable + public final Time masterTimeout() { + return this.masterTimeout; + } + + /** + * Period to wait for a response. If no response is received before the timeout + * expires, the request fails and returns an error. + *

    + * API name: {@code timeout} + */ + @Nullable + public final Time timeout() { + return this.timeout; + } + + // --------------------------------------------------------------------------------------------- + + /** + * Builder for {@link GetStatsRequest}. + */ + + public static class Builder extends RequestBase.AbstractBuilder implements ObjectBuilder { + @Nullable + private Time masterTimeout; + + @Nullable + private Time timeout; + + /** + * Period to wait for a connection to the master node. If no response is + * received before the timeout expires, the request fails and returns an error. + *

    + * API name: {@code master_timeout} + */ + public final Builder masterTimeout(@Nullable Time value) { + this.masterTimeout = value; + return this; + } + + /** + * Period to wait for a connection to the master node. If no response is + * received before the timeout expires, the request fails and returns an error. + *

    + * API name: {@code master_timeout} + */ + public final Builder masterTimeout(Function> fn) { + return this.masterTimeout(fn.apply(new Time.Builder()).build()); + } + + /** + * Period to wait for a response. If no response is received before the timeout + * expires, the request fails and returns an error. + *

    + * API name: {@code timeout} + */ + public final Builder timeout(@Nullable Time value) { + this.timeout = value; + return this; + } + + /** + * Period to wait for a response. If no response is received before the timeout + * expires, the request fails and returns an error. + *

    + * API name: {@code timeout} + */ + public final Builder timeout(Function> fn) { + return this.timeout(fn.apply(new Time.Builder()).build()); + } + + @Override + protected Builder self() { + return this; + } + + /** + * Builds a {@link GetStatsRequest}. + * + * @throws NullPointerException + * if some of the required fields are null. + */ + public GetStatsRequest build() { + _checkSingleUse(); + + return new GetStatsRequest(this); + } + } // --------------------------------------------------------------------------------------------- @@ -93,7 +204,14 @@ public GetStatsRequest() { // Request parameters request -> { - return Collections.emptyMap(); + Map params = new HashMap<>(); + if (request.masterTimeout != null) { + params.put("master_timeout", request.masterTimeout._toJsonString()); + } + if (request.timeout != null) { + params.put("timeout", request.timeout._toJsonString()); + } + return params; }, SimpleEndpoint.emptyMap(), false, GetStatsResponse._DESERIALIZER); } diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/slm/PutLifecycleRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/slm/PutLifecycleRequest.java index ad68597ae..5f1803804 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/slm/PutLifecycleRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/slm/PutLifecycleRequest.java @@ -58,7 +58,9 @@ // typedef: slm.put_lifecycle.Request /** - * Creates or updates a snapshot lifecycle policy. + * Create or update a policy. Create or update a snapshot lifecycle policy. If + * the policy already exists, this request increments the policy version. Only + * the latest version of a policy is stored. * * @see API * specification @@ -141,7 +143,8 @@ public final String name() { } /** - * Required - ID for the snapshot lifecycle policy you want to create or update. + * Required - The identifier for the snapshot lifecycle policy you want to + * create or update. *

    * API name: {@code policy_id} */ @@ -317,7 +320,8 @@ public final Builder name(@Nullable String value) { } /** - * Required - ID for the snapshot lifecycle policy you want to create or update. + * Required - The identifier for the snapshot lifecycle policy you want to + * create or update. *

    * API name: {@code policy_id} */ diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/slm/StartSlmRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/slm/StartSlmRequest.java index 2be9ce3f7..d3d8c0f46 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/slm/StartSlmRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/slm/StartSlmRequest.java @@ -21,6 +21,7 @@ import co.elastic.clients.elasticsearch._types.ErrorResponse; import co.elastic.clients.elasticsearch._types.RequestBase; +import co.elastic.clients.elasticsearch._types.Time; import co.elastic.clients.json.JsonpDeserializable; import co.elastic.clients.json.JsonpDeserializer; import co.elastic.clients.json.ObjectBuilderDeserializer; @@ -30,7 +31,11 @@ import co.elastic.clients.util.ObjectBuilder; import jakarta.json.stream.JsonGenerator; import java.util.Collections; +import java.util.HashMap; +import java.util.Map; import java.util.Objects; +import java.util.function.Function; +import javax.annotation.Nullable; //---------------------------------------------------------------- // THIS CODE IS GENERATED. MANUAL EDITS WILL BE LOST. @@ -50,20 +55,128 @@ // typedef: slm.start.Request /** - * Turns on snapshot lifecycle management (SLM). + * Start snapshot lifecycle management. Snapshot lifecycle management (SLM) + * starts automatically when a cluster is formed. Manually starting SLM is + * necessary only if it has been stopped using the stop SLM API. * * @see API * specification */ public class StartSlmRequest extends RequestBase { - public StartSlmRequest() { + @Nullable + private final Time masterTimeout; + + @Nullable + private final Time timeout; + + // --------------------------------------------------------------------------------------------- + + private StartSlmRequest(Builder builder) { + + this.masterTimeout = builder.masterTimeout; + this.timeout = builder.timeout; + + } + + public static StartSlmRequest of(Function> fn) { + return fn.apply(new Builder()).build(); } /** - * Singleton instance for {@link StartSlmRequest}. + * Period to wait for a connection to the master node. If no response is + * received before the timeout expires, the request fails and returns an error. + *

    + * API name: {@code master_timeout} */ - public static final StartSlmRequest _INSTANCE = new StartSlmRequest(); + @Nullable + public final Time masterTimeout() { + return this.masterTimeout; + } + + /** + * Period to wait for a response. If no response is received before the timeout + * expires, the request fails and returns an error. + *

    + * API name: {@code timeout} + */ + @Nullable + public final Time timeout() { + return this.timeout; + } + + // --------------------------------------------------------------------------------------------- + + /** + * Builder for {@link StartSlmRequest}. + */ + + public static class Builder extends RequestBase.AbstractBuilder implements ObjectBuilder { + @Nullable + private Time masterTimeout; + + @Nullable + private Time timeout; + + /** + * Period to wait for a connection to the master node. If no response is + * received before the timeout expires, the request fails and returns an error. + *

    + * API name: {@code master_timeout} + */ + public final Builder masterTimeout(@Nullable Time value) { + this.masterTimeout = value; + return this; + } + + /** + * Period to wait for a connection to the master node. If no response is + * received before the timeout expires, the request fails and returns an error. + *

    + * API name: {@code master_timeout} + */ + public final Builder masterTimeout(Function> fn) { + return this.masterTimeout(fn.apply(new Time.Builder()).build()); + } + + /** + * Period to wait for a response. If no response is received before the timeout + * expires, the request fails and returns an error. + *

    + * API name: {@code timeout} + */ + public final Builder timeout(@Nullable Time value) { + this.timeout = value; + return this; + } + + /** + * Period to wait for a response. If no response is received before the timeout + * expires, the request fails and returns an error. + *

    + * API name: {@code timeout} + */ + public final Builder timeout(Function> fn) { + return this.timeout(fn.apply(new Time.Builder()).build()); + } + + @Override + protected Builder self() { + return this; + } + + /** + * Builds a {@link StartSlmRequest}. + * + * @throws NullPointerException + * if some of the required fields are null. + */ + public StartSlmRequest build() { + _checkSingleUse(); + + return new StartSlmRequest(this); + } + } // --------------------------------------------------------------------------------------------- @@ -92,7 +205,14 @@ public StartSlmRequest() { // Request parameters request -> { - return Collections.emptyMap(); + Map params = new HashMap<>(); + if (request.masterTimeout != null) { + params.put("master_timeout", request.masterTimeout._toJsonString()); + } + if (request.timeout != null) { + params.put("timeout", request.timeout._toJsonString()); + } + return params; }, SimpleEndpoint.emptyMap(), false, StartSlmResponse._DESERIALIZER); } diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/slm/StopSlmRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/slm/StopSlmRequest.java index bc7a32f73..249aabbeb 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/slm/StopSlmRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/slm/StopSlmRequest.java @@ -21,6 +21,7 @@ import co.elastic.clients.elasticsearch._types.ErrorResponse; import co.elastic.clients.elasticsearch._types.RequestBase; +import co.elastic.clients.elasticsearch._types.Time; import co.elastic.clients.json.JsonpDeserializable; import co.elastic.clients.json.JsonpDeserializer; import co.elastic.clients.json.ObjectBuilderDeserializer; @@ -30,7 +31,11 @@ import co.elastic.clients.util.ObjectBuilder; import jakarta.json.stream.JsonGenerator; import java.util.Collections; +import java.util.HashMap; +import java.util.Map; import java.util.Objects; +import java.util.function.Function; +import javax.annotation.Nullable; //---------------------------------------------------------------- // THIS CODE IS GENERATED. MANUAL EDITS WILL BE LOST. @@ -50,20 +55,136 @@ // typedef: slm.stop.Request /** - * Turns off snapshot lifecycle management (SLM). + * Stop snapshot lifecycle management. Stop all snapshot lifecycle management + * (SLM) operations and the SLM plugin. This API is useful when you are + * performing maintenance on a cluster and need to prevent SLM from performing + * any actions on your data streams or indices. Stopping SLM does not stop any + * snapshots that are in progress. You can manually trigger snapshots with the + * run snapshot lifecycle policy API even if SLM is stopped. + *

    + * The API returns a response as soon as the request is acknowledged, but the + * plugin might continue to run until in-progress operations complete and it can + * be safely stopped. Use the get snapshot lifecycle management status API to + * see if SLM is running. * * @see API * specification */ public class StopSlmRequest extends RequestBase { - public StopSlmRequest() { + @Nullable + private final Time masterTimeout; + + @Nullable + private final Time timeout; + + // --------------------------------------------------------------------------------------------- + + private StopSlmRequest(Builder builder) { + + this.masterTimeout = builder.masterTimeout; + this.timeout = builder.timeout; + + } + + public static StopSlmRequest of(Function> fn) { + return fn.apply(new Builder()).build(); } /** - * Singleton instance for {@link StopSlmRequest}. + * Period to wait for a connection to the master node. If no response is + * received before the timeout expires, the request fails and returns an error. + *

    + * API name: {@code master_timeout} */ - public static final StopSlmRequest _INSTANCE = new StopSlmRequest(); + @Nullable + public final Time masterTimeout() { + return this.masterTimeout; + } + + /** + * Period to wait for a response. If no response is received before the timeout + * expires, the request fails and returns an error. + *

    + * API name: {@code timeout} + */ + @Nullable + public final Time timeout() { + return this.timeout; + } + + // --------------------------------------------------------------------------------------------- + + /** + * Builder for {@link StopSlmRequest}. + */ + + public static class Builder extends RequestBase.AbstractBuilder implements ObjectBuilder { + @Nullable + private Time masterTimeout; + + @Nullable + private Time timeout; + + /** + * Period to wait for a connection to the master node. If no response is + * received before the timeout expires, the request fails and returns an error. + *

    + * API name: {@code master_timeout} + */ + public final Builder masterTimeout(@Nullable Time value) { + this.masterTimeout = value; + return this; + } + + /** + * Period to wait for a connection to the master node. If no response is + * received before the timeout expires, the request fails and returns an error. + *

    + * API name: {@code master_timeout} + */ + public final Builder masterTimeout(Function> fn) { + return this.masterTimeout(fn.apply(new Time.Builder()).build()); + } + + /** + * Period to wait for a response. If no response is received before the timeout + * expires, the request fails and returns an error. + *

    + * API name: {@code timeout} + */ + public final Builder timeout(@Nullable Time value) { + this.timeout = value; + return this; + } + + /** + * Period to wait for a response. If no response is received before the timeout + * expires, the request fails and returns an error. + *

    + * API name: {@code timeout} + */ + public final Builder timeout(Function> fn) { + return this.timeout(fn.apply(new Time.Builder()).build()); + } + + @Override + protected Builder self() { + return this; + } + + /** + * Builds a {@link StopSlmRequest}. + * + * @throws NullPointerException + * if some of the required fields are null. + */ + public StopSlmRequest build() { + _checkSingleUse(); + + return new StopSlmRequest(this); + } + } // --------------------------------------------------------------------------------------------- @@ -92,7 +213,14 @@ public StopSlmRequest() { // Request parameters request -> { - return Collections.emptyMap(); + Map params = new HashMap<>(); + if (request.masterTimeout != null) { + params.put("master_timeout", request.masterTimeout._toJsonString()); + } + if (request.timeout != null) { + params.put("timeout", request.timeout._toJsonString()); + } + return params; }, SimpleEndpoint.emptyMap(), false, StopSlmResponse._DESERIALIZER); } diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/snapshot/CleanupRepositoryRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/snapshot/CleanupRepositoryRequest.java index 5626be8bd..759d3157a 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/snapshot/CleanupRepositoryRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/snapshot/CleanupRepositoryRequest.java @@ -56,8 +56,9 @@ // typedef: snapshot.cleanup_repository.Request /** - * Triggers the review of a snapshot repository’s contents and deletes any stale - * data not referenced by existing snapshots. + * Clean up the snapshot repository. Trigger the review of the contents of a + * snapshot repository and delete any stale data not referenced by existing + * snapshots. * * @see API diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/snapshot/CloneSnapshotRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/snapshot/CloneSnapshotRequest.java index d23514cb8..ab5a8ea0b 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/snapshot/CloneSnapshotRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/snapshot/CloneSnapshotRequest.java @@ -58,8 +58,8 @@ // typedef: snapshot.clone.Request /** - * Clones indices from one snapshot into another snapshot in the same - * repository. + * Clone a snapshot. Clone part of all of a snapshot into another snapshot in + * the same repository. * * @see API * specification diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/snapshot/CreateRepositoryRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/snapshot/CreateRepositoryRequest.java index 0bb69501c..86eeca000 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/snapshot/CreateRepositoryRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/snapshot/CreateRepositoryRequest.java @@ -60,7 +60,13 @@ // typedef: snapshot.create_repository.Request /** - * Creates a repository. + * Create or update a snapshot repository. IMPORTANT: If you are migrating + * searchable snapshots, the repository name must be identical in the source and + * destination clusters. To register a snapshot repository, the cluster's global + * metadata must be writeable. Ensure there are no cluster blocks (for example, + * cluster.blocks.read_only and + * clsuter.blocks.read_only_allow_delete settings) that prevent + * write access. * * @see API diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/snapshot/CreateSnapshotRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/snapshot/CreateSnapshotRequest.java index 5a415b198..fc6be7a48 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/snapshot/CreateSnapshotRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/snapshot/CreateSnapshotRequest.java @@ -61,7 +61,8 @@ // typedef: snapshot.create.Request /** - * Creates a snapshot in a repository. + * Create a snapshot. Take a snapshot of a cluster or of data streams and + * indices. * * @see API * specification diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/snapshot/DeleteRepositoryRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/snapshot/DeleteRepositoryRequest.java index c8f2587ea..7521c6549 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/snapshot/DeleteRepositoryRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/snapshot/DeleteRepositoryRequest.java @@ -58,7 +58,10 @@ // typedef: snapshot.delete_repository.Request /** - * Deletes a repository. + * Delete snapshot repositories. When a repository is unregistered, + * Elasticsearch removes only the reference to the location where the repository + * is storing the snapshots. The snapshots themselves are left untouched and in + * place. * * @see API diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/snapshot/DeleteSnapshotRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/snapshot/DeleteSnapshotRequest.java index fc29ad635..cfd12fe6b 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/snapshot/DeleteSnapshotRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/snapshot/DeleteSnapshotRequest.java @@ -56,7 +56,7 @@ // typedef: snapshot.delete.Request /** - * Deletes one or more snapshots. + * Delete snapshots. * * @see API * specification diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/snapshot/ElasticsearchSnapshotAsyncClient.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/snapshot/ElasticsearchSnapshotAsyncClient.java index 0b5bdfaca..c48c2e601 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/snapshot/ElasticsearchSnapshotAsyncClient.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/snapshot/ElasticsearchSnapshotAsyncClient.java @@ -70,11 +70,12 @@ public ElasticsearchSnapshotAsyncClient withTransportOptions(@Nullable Transport // ----- Endpoint: snapshot.cleanup_repository /** - * Triggers the review of a snapshot repository’s contents and deletes any stale - * data not referenced by existing snapshots. + * Clean up the snapshot repository. Trigger the review of the contents of a + * snapshot repository and delete any stale data not referenced by existing + * snapshots. * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/clean-up-snapshot-repo-api.html">Documentation * on elastic.co */ @@ -86,14 +87,15 @@ public CompletableFuture cleanupRepository(CleanupRep } /** - * Triggers the review of a snapshot repository’s contents and deletes any stale - * data not referenced by existing snapshots. + * Clean up the snapshot repository. Trigger the review of the contents of a + * snapshot repository and delete any stale data not referenced by existing + * snapshots. * * @param fn * a function that initializes a builder to create the * {@link CleanupRepositoryRequest} * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/clean-up-snapshot-repo-api.html">Documentation * on elastic.co */ @@ -105,11 +107,11 @@ public final CompletableFuture cleanupRepository( // ----- Endpoint: snapshot.clone /** - * Clones indices from one snapshot into another snapshot in the same - * repository. + * Clone a snapshot. Clone part of all of a snapshot into another snapshot in + * the same repository. * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/clone-snapshot-api.html">Documentation * on elastic.co */ @@ -121,14 +123,14 @@ public CompletableFuture clone(CloneSnapshotRequest reque } /** - * Clones indices from one snapshot into another snapshot in the same - * repository. + * Clone a snapshot. Clone part of all of a snapshot into another snapshot in + * the same repository. * * @param fn * a function that initializes a builder to create the * {@link CloneSnapshotRequest} * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/clone-snapshot-api.html">Documentation * on elastic.co */ @@ -140,10 +142,11 @@ public final CompletableFuture clone( // ----- Endpoint: snapshot.create /** - * Creates a snapshot in a repository. + * Create a snapshot. Take a snapshot of a cluster or of data streams and + * indices. * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/create-snapshot-api.html">Documentation * on elastic.co */ @@ -155,13 +158,14 @@ public CompletableFuture create(CreateSnapshotRequest re } /** - * Creates a snapshot in a repository. + * Create a snapshot. Take a snapshot of a cluster or of data streams and + * indices. * * @param fn * a function that initializes a builder to create the * {@link CreateSnapshotRequest} * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/create-snapshot-api.html">Documentation * on elastic.co */ @@ -173,7 +177,13 @@ public final CompletableFuture create( // ----- Endpoint: snapshot.create_repository /** - * Creates a repository. + * Create or update a snapshot repository. IMPORTANT: If you are migrating + * searchable snapshots, the repository name must be identical in the source and + * destination clusters. To register a snapshot repository, the cluster's global + * metadata must be writeable. Ensure there are no cluster blocks (for example, + * cluster.blocks.read_only and + * clsuter.blocks.read_only_allow_delete settings) that prevent + * write access. * * @see Documentation @@ -188,7 +198,13 @@ public CompletableFuture createRepository(CreateReposi } /** - * Creates a repository. + * Create or update a snapshot repository. IMPORTANT: If you are migrating + * searchable snapshots, the repository name must be identical in the source and + * destination clusters. To register a snapshot repository, the cluster's global + * metadata must be writeable. Ensure there are no cluster blocks (for example, + * cluster.blocks.read_only and + * clsuter.blocks.read_only_allow_delete settings) that prevent + * write access. * * @param fn * a function that initializes a builder to create the @@ -206,10 +222,10 @@ public final CompletableFuture createRepository( // ----- Endpoint: snapshot.delete /** - * Deletes one or more snapshots. + * Delete snapshots. * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/delete-snapshot-api.html">Documentation * on elastic.co */ @@ -221,13 +237,13 @@ public CompletableFuture delete(DeleteSnapshotRequest re } /** - * Deletes one or more snapshots. + * Delete snapshots. * * @param fn * a function that initializes a builder to create the * {@link DeleteSnapshotRequest} * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/delete-snapshot-api.html">Documentation * on elastic.co */ @@ -239,10 +255,13 @@ public final CompletableFuture delete( // ----- Endpoint: snapshot.delete_repository /** - * Deletes a repository. + * Delete snapshot repositories. When a repository is unregistered, + * Elasticsearch removes only the reference to the location where the repository + * is storing the snapshots. The snapshots themselves are left untouched and in + * place. * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/delete-snapshot-repo-api.html">Documentation * on elastic.co */ @@ -254,13 +273,16 @@ public CompletableFuture deleteRepository(DeleteReposi } /** - * Deletes a repository. + * Delete snapshot repositories. When a repository is unregistered, + * Elasticsearch removes only the reference to the location where the repository + * is storing the snapshots. The snapshots themselves are left untouched and in + * place. * * @param fn * a function that initializes a builder to create the * {@link DeleteRepositoryRequest} * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/delete-snapshot-repo-api.html">Documentation * on elastic.co */ @@ -272,10 +294,10 @@ public final CompletableFuture deleteRepository( // ----- Endpoint: snapshot.get /** - * Returns information about a snapshot. + * Get snapshot information. * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/get-snapshot-api.html">Documentation * on elastic.co */ @@ -287,13 +309,13 @@ public CompletableFuture get(GetSnapshotRequest request) { } /** - * Returns information about a snapshot. + * Get snapshot information. * * @param fn * a function that initializes a builder to create the * {@link GetSnapshotRequest} * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/get-snapshot-api.html">Documentation * on elastic.co */ @@ -305,10 +327,10 @@ public final CompletableFuture get( // ----- Endpoint: snapshot.get_repository /** - * Returns information about a repository. + * Get snapshot repository information. * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/get-snapshot-repo-api.html">Documentation * on elastic.co */ @@ -320,13 +342,13 @@ public CompletableFuture getRepository(GetRepositoryReque } /** - * Returns information about a repository. + * Get snapshot repository information. * * @param fn * a function that initializes a builder to create the * {@link GetRepositoryRequest} * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/get-snapshot-repo-api.html">Documentation * on elastic.co */ @@ -336,10 +358,10 @@ public final CompletableFuture getRepository( } /** - * Returns information about a repository. + * Get snapshot repository information. * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/get-snapshot-repo-api.html">Documentation * on elastic.co */ @@ -351,10 +373,59 @@ public CompletableFuture getRepository() { // ----- Endpoint: snapshot.repository_verify_integrity /** - * Verifies the integrity of the contents of a snapshot repository + * Verify the repository integrity. Verify the integrity of the contents of a + * snapshot repository. + *

    + * This API enables you to perform a comprehensive check of the contents of a + * repository, looking for any anomalies in its data or metadata which might + * prevent you from restoring snapshots from the repository or which might cause + * future snapshot create or delete operations to fail. + *

    + * If you suspect the integrity of the contents of one of your snapshot + * repositories, cease all write activity to this repository immediately, set + * its read_only option to true, and use this API to + * verify its integrity. Until you do so: + *

      + *
    • It may not be possible to restore some snapshots from this + * repository.
    • + *
    • Searchable snapshots may report errors when searched or may have + * unassigned shards.
    • + *
    • Taking snapshots into this repository may fail or may appear to succeed + * but have created a snapshot which cannot be restored.
    • + *
    • Deleting snapshots from this repository may fail or may appear to succeed + * but leave the underlying data on disk.
    • + *
    • Continuing to write to the repository while it is in an invalid state may + * causing additional damage to its contents.
    • + *
    + *

    + * If the API finds any problems with the integrity of the contents of your + * repository, Elasticsearch will not be able to repair the damage. The only way + * to bring the repository back into a fully working state after its contents + * have been damaged is by restoring its contents from a repository backup which + * was taken before the damage occurred. You must also identify what caused the + * damage and take action to prevent it from happening again. + *

    + * If you cannot restore a repository backup, register a new repository and use + * this for all future snapshot operations. In some cases it may be possible to + * recover some of the contents of a damaged repository, either by restoring as + * many of its snapshots as needed and taking new snapshots of the restored + * data, or by using the reindex API to copy data from any searchable snapshots + * mounted from the damaged repository. + *

    + * Avoid all operations which write to the repository while the verify + * repository integrity API is running. If something changes the repository + * contents while an integrity verification is running then Elasticsearch may + * incorrectly report having detected some anomalies in its contents due to the + * concurrent writes. It may also incorrectly fail to report some anomalies that + * the concurrent writes prevented it from detecting. + *

    + * NOTE: This API is intended for exploratory use by humans. You should expect + * the request parameters and the response format to vary in future versions. + *

    + * NOTE: This API may not work correctly in a mixed-version cluster. * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/verify-repo-integrity-api.html">Documentation * on elastic.co */ @@ -367,13 +438,62 @@ public CompletableFuture repositoryVerifyInte } /** - * Verifies the integrity of the contents of a snapshot repository + * Verify the repository integrity. Verify the integrity of the contents of a + * snapshot repository. + *

    + * This API enables you to perform a comprehensive check of the contents of a + * repository, looking for any anomalies in its data or metadata which might + * prevent you from restoring snapshots from the repository or which might cause + * future snapshot create or delete operations to fail. + *

    + * If you suspect the integrity of the contents of one of your snapshot + * repositories, cease all write activity to this repository immediately, set + * its read_only option to true, and use this API to + * verify its integrity. Until you do so: + *

      + *
    • It may not be possible to restore some snapshots from this + * repository.
    • + *
    • Searchable snapshots may report errors when searched or may have + * unassigned shards.
    • + *
    • Taking snapshots into this repository may fail or may appear to succeed + * but have created a snapshot which cannot be restored.
    • + *
    • Deleting snapshots from this repository may fail or may appear to succeed + * but leave the underlying data on disk.
    • + *
    • Continuing to write to the repository while it is in an invalid state may + * causing additional damage to its contents.
    • + *
    + *

    + * If the API finds any problems with the integrity of the contents of your + * repository, Elasticsearch will not be able to repair the damage. The only way + * to bring the repository back into a fully working state after its contents + * have been damaged is by restoring its contents from a repository backup which + * was taken before the damage occurred. You must also identify what caused the + * damage and take action to prevent it from happening again. + *

    + * If you cannot restore a repository backup, register a new repository and use + * this for all future snapshot operations. In some cases it may be possible to + * recover some of the contents of a damaged repository, either by restoring as + * many of its snapshots as needed and taking new snapshots of the restored + * data, or by using the reindex API to copy data from any searchable snapshots + * mounted from the damaged repository. + *

    + * Avoid all operations which write to the repository while the verify + * repository integrity API is running. If something changes the repository + * contents while an integrity verification is running then Elasticsearch may + * incorrectly report having detected some anomalies in its contents due to the + * concurrent writes. It may also incorrectly fail to report some anomalies that + * the concurrent writes prevented it from detecting. + *

    + * NOTE: This API is intended for exploratory use by humans. You should expect + * the request parameters and the response format to vary in future versions. + *

    + * NOTE: This API may not work correctly in a mixed-version cluster. * * @param fn * a function that initializes a builder to create the * {@link RepositoryVerifyIntegrityRequest} * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/verify-repo-integrity-api.html">Documentation * on elastic.co */ @@ -385,10 +505,35 @@ public final CompletableFuture repositoryVeri // ----- Endpoint: snapshot.restore /** - * Restores a snapshot. + * Restore a snapshot. Restore a snapshot of a cluster or data streams and + * indices. + *

    + * You can restore a snapshot only to a running cluster with an elected master + * node. The snapshot repository must be registered and available to the + * cluster. The snapshot and cluster versions must be compatible. + *

    + * To restore a snapshot, the cluster's global metadata must be writable. Ensure + * there are't any cluster blocks that prevent writes. The restore operation + * ignores index blocks. + *

    + * Before you restore a data stream, ensure the cluster contains a matching + * index template with data streams enabled. To check, use the index management + * feature in Kibana or the get index template API: + * + *

    +	 * GET _index_template/*?filter_path=index_templates.name,index_templates.index_template.index_patterns,index_templates.index_template.data_stream
    +	 * 
    +	 * 
    + *

    + * If no such template exists, you can create one or restore a cluster state + * that contains one. Without a matching index template, a data stream can't + * roll over or create backing indices. + *

    + * If your snapshot contains data from App Search or Workplace Search, you must + * restore the Enterprise Search encryption key before you restore the snapshot. * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/restore-snapshot-api.html">Documentation * on elastic.co */ @@ -400,13 +545,38 @@ public CompletableFuture restore(RestoreRequest request) { } /** - * Restores a snapshot. + * Restore a snapshot. Restore a snapshot of a cluster or data streams and + * indices. + *

    + * You can restore a snapshot only to a running cluster with an elected master + * node. The snapshot repository must be registered and available to the + * cluster. The snapshot and cluster versions must be compatible. + *

    + * To restore a snapshot, the cluster's global metadata must be writable. Ensure + * there are't any cluster blocks that prevent writes. The restore operation + * ignores index blocks. + *

    + * Before you restore a data stream, ensure the cluster contains a matching + * index template with data streams enabled. To check, use the index management + * feature in Kibana or the get index template API: + * + *

    +	 * GET _index_template/*?filter_path=index_templates.name,index_templates.index_template.index_patterns,index_templates.index_template.data_stream
    +	 * 
    +	 * 
    + *

    + * If no such template exists, you can create one or restore a cluster state + * that contains one. Without a matching index template, a data stream can't + * roll over or create backing indices. + *

    + * If your snapshot contains data from App Search or Workplace Search, you must + * restore the Enterprise Search encryption key before you restore the snapshot. * * @param fn * a function that initializes a builder to create the * {@link RestoreRequest} * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/restore-snapshot-api.html">Documentation * on elastic.co */ @@ -418,10 +588,24 @@ public final CompletableFuture restore( // ----- Endpoint: snapshot.status /** - * Returns information about the status of a snapshot. + * Get the snapshot status. Get a detailed description of the current state for + * each shard participating in the snapshot. Note that this API should be used + * only to obtain detailed shard-level information for ongoing snapshots. If + * this detail is not needed or you want to obtain information about one or more + * existing snapshots, use the get snapshot API. + *

    + * WARNING: Using the API to return the status of any snapshots other than + * currently running snapshots can be expensive. The API requires a read from + * the repository for each shard in each snapshot. For example, if you have 100 + * snapshots with 1,000 shards each, an API request that includes all snapshots + * will require 100,000 reads (100 snapshots x 1,000 shards). + *

    + * Depending on the latency of your storage, such requests can take an extremely + * long time to return results. These requests can also tax machine resources + * and, when using cloud storage, incur high processing costs. * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/get-snapshot-status-api.html">Documentation * on elastic.co */ @@ -433,13 +617,27 @@ public CompletableFuture status(SnapshotStatusRequest re } /** - * Returns information about the status of a snapshot. + * Get the snapshot status. Get a detailed description of the current state for + * each shard participating in the snapshot. Note that this API should be used + * only to obtain detailed shard-level information for ongoing snapshots. If + * this detail is not needed or you want to obtain information about one or more + * existing snapshots, use the get snapshot API. + *

    + * WARNING: Using the API to return the status of any snapshots other than + * currently running snapshots can be expensive. The API requires a read from + * the repository for each shard in each snapshot. For example, if you have 100 + * snapshots with 1,000 shards each, an API request that includes all snapshots + * will require 100,000 reads (100 snapshots x 1,000 shards). + *

    + * Depending on the latency of your storage, such requests can take an extremely + * long time to return results. These requests can also tax machine resources + * and, when using cloud storage, incur high processing costs. * * @param fn * a function that initializes a builder to create the * {@link SnapshotStatusRequest} * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/get-snapshot-status-api.html">Documentation * on elastic.co */ @@ -449,10 +647,24 @@ public final CompletableFuture status( } /** - * Returns information about the status of a snapshot. + * Get the snapshot status. Get a detailed description of the current state for + * each shard participating in the snapshot. Note that this API should be used + * only to obtain detailed shard-level information for ongoing snapshots. If + * this detail is not needed or you want to obtain information about one or more + * existing snapshots, use the get snapshot API. + *

    + * WARNING: Using the API to return the status of any snapshots other than + * currently running snapshots can be expensive. The API requires a read from + * the repository for each shard in each snapshot. For example, if you have 100 + * snapshots with 1,000 shards each, an API request that includes all snapshots + * will require 100,000 reads (100 snapshots x 1,000 shards). + *

    + * Depending on the latency of your storage, such requests can take an extremely + * long time to return results. These requests can also tax machine resources + * and, when using cloud storage, incur high processing costs. * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/get-snapshot-status-api.html">Documentation * on elastic.co */ @@ -464,10 +676,11 @@ public CompletableFuture status() { // ----- Endpoint: snapshot.verify_repository /** - * Verifies a repository. + * Verify a snapshot repository. Check for common misconfigurations in a + * snapshot repository. * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/verify-snapshot-repo-api.html">Documentation * on elastic.co */ @@ -479,13 +692,14 @@ public CompletableFuture verifyRepository(VerifyReposi } /** - * Verifies a repository. + * Verify a snapshot repository. Check for common misconfigurations in a + * snapshot repository. * * @param fn * a function that initializes a builder to create the * {@link VerifyRepositoryRequest} * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/verify-snapshot-repo-api.html">Documentation * on elastic.co */ diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/snapshot/ElasticsearchSnapshotClient.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/snapshot/ElasticsearchSnapshotClient.java index 8e0b2dee8..be3d00434 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/snapshot/ElasticsearchSnapshotClient.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/snapshot/ElasticsearchSnapshotClient.java @@ -68,11 +68,12 @@ public ElasticsearchSnapshotClient withTransportOptions(@Nullable TransportOptio // ----- Endpoint: snapshot.cleanup_repository /** - * Triggers the review of a snapshot repository’s contents and deletes any stale - * data not referenced by existing snapshots. + * Clean up the snapshot repository. Trigger the review of the contents of a + * snapshot repository and delete any stale data not referenced by existing + * snapshots. * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/clean-up-snapshot-repo-api.html">Documentation * on elastic.co */ @@ -85,14 +86,15 @@ public CleanupRepositoryResponse cleanupRepository(CleanupRepositoryRequest requ } /** - * Triggers the review of a snapshot repository’s contents and deletes any stale - * data not referenced by existing snapshots. + * Clean up the snapshot repository. Trigger the review of the contents of a + * snapshot repository and delete any stale data not referenced by existing + * snapshots. * * @param fn * a function that initializes a builder to create the * {@link CleanupRepositoryRequest} * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/clean-up-snapshot-repo-api.html">Documentation * on elastic.co */ @@ -105,11 +107,11 @@ public final CleanupRepositoryResponse cleanupRepository( // ----- Endpoint: snapshot.clone /** - * Clones indices from one snapshot into another snapshot in the same - * repository. + * Clone a snapshot. Clone part of all of a snapshot into another snapshot in + * the same repository. * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/clone-snapshot-api.html">Documentation * on elastic.co */ @@ -121,14 +123,14 @@ public CloneSnapshotResponse clone(CloneSnapshotRequest request) throws IOExcept } /** - * Clones indices from one snapshot into another snapshot in the same - * repository. + * Clone a snapshot. Clone part of all of a snapshot into another snapshot in + * the same repository. * * @param fn * a function that initializes a builder to create the * {@link CloneSnapshotRequest} * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/clone-snapshot-api.html">Documentation * on elastic.co */ @@ -141,10 +143,11 @@ public final CloneSnapshotResponse clone( // ----- Endpoint: snapshot.create /** - * Creates a snapshot in a repository. + * Create a snapshot. Take a snapshot of a cluster or of data streams and + * indices. * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/create-snapshot-api.html">Documentation * on elastic.co */ @@ -156,13 +159,14 @@ public CreateSnapshotResponse create(CreateSnapshotRequest request) throws IOExc } /** - * Creates a snapshot in a repository. + * Create a snapshot. Take a snapshot of a cluster or of data streams and + * indices. * * @param fn * a function that initializes a builder to create the * {@link CreateSnapshotRequest} * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/create-snapshot-api.html">Documentation * on elastic.co */ @@ -175,7 +179,13 @@ public final CreateSnapshotResponse create( // ----- Endpoint: snapshot.create_repository /** - * Creates a repository. + * Create or update a snapshot repository. IMPORTANT: If you are migrating + * searchable snapshots, the repository name must be identical in the source and + * destination clusters. To register a snapshot repository, the cluster's global + * metadata must be writeable. Ensure there are no cluster blocks (for example, + * cluster.blocks.read_only and + * clsuter.blocks.read_only_allow_delete settings) that prevent + * write access. * * @see Documentation @@ -191,7 +201,13 @@ public CreateRepositoryResponse createRepository(CreateRepositoryRequest request } /** - * Creates a repository. + * Create or update a snapshot repository. IMPORTANT: If you are migrating + * searchable snapshots, the repository name must be identical in the source and + * destination clusters. To register a snapshot repository, the cluster's global + * metadata must be writeable. Ensure there are no cluster blocks (for example, + * cluster.blocks.read_only and + * clsuter.blocks.read_only_allow_delete settings) that prevent + * write access. * * @param fn * a function that initializes a builder to create the @@ -210,10 +226,10 @@ public final CreateRepositoryResponse createRepository( // ----- Endpoint: snapshot.delete /** - * Deletes one or more snapshots. + * Delete snapshots. * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/delete-snapshot-api.html">Documentation * on elastic.co */ @@ -225,13 +241,13 @@ public DeleteSnapshotResponse delete(DeleteSnapshotRequest request) throws IOExc } /** - * Deletes one or more snapshots. + * Delete snapshots. * * @param fn * a function that initializes a builder to create the * {@link DeleteSnapshotRequest} * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/delete-snapshot-api.html">Documentation * on elastic.co */ @@ -244,10 +260,13 @@ public final DeleteSnapshotResponse delete( // ----- Endpoint: snapshot.delete_repository /** - * Deletes a repository. + * Delete snapshot repositories. When a repository is unregistered, + * Elasticsearch removes only the reference to the location where the repository + * is storing the snapshots. The snapshots themselves are left untouched and in + * place. * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/delete-snapshot-repo-api.html">Documentation * on elastic.co */ @@ -260,13 +279,16 @@ public DeleteRepositoryResponse deleteRepository(DeleteRepositoryRequest request } /** - * Deletes a repository. + * Delete snapshot repositories. When a repository is unregistered, + * Elasticsearch removes only the reference to the location where the repository + * is storing the snapshots. The snapshots themselves are left untouched and in + * place. * * @param fn * a function that initializes a builder to create the * {@link DeleteRepositoryRequest} * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/delete-snapshot-repo-api.html">Documentation * on elastic.co */ @@ -279,10 +301,10 @@ public final DeleteRepositoryResponse deleteRepository( // ----- Endpoint: snapshot.get /** - * Returns information about a snapshot. + * Get snapshot information. * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/get-snapshot-api.html">Documentation * on elastic.co */ @@ -294,13 +316,13 @@ public GetSnapshotResponse get(GetSnapshotRequest request) throws IOException, E } /** - * Returns information about a snapshot. + * Get snapshot information. * * @param fn * a function that initializes a builder to create the * {@link GetSnapshotRequest} * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/get-snapshot-api.html">Documentation * on elastic.co */ @@ -312,10 +334,10 @@ public final GetSnapshotResponse get(FunctionDocumentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/get-snapshot-repo-api.html">Documentation * on elastic.co */ @@ -328,13 +350,13 @@ public GetRepositoryResponse getRepository(GetRepositoryRequest request) } /** - * Returns information about a repository. + * Get snapshot repository information. * * @param fn * a function that initializes a builder to create the * {@link GetRepositoryRequest} * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/get-snapshot-repo-api.html">Documentation * on elastic.co */ @@ -345,10 +367,10 @@ public final GetRepositoryResponse getRepository( } /** - * Returns information about a repository. + * Get snapshot repository information. * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/get-snapshot-repo-api.html">Documentation * on elastic.co */ @@ -360,10 +382,59 @@ public GetRepositoryResponse getRepository() throws IOException, ElasticsearchEx // ----- Endpoint: snapshot.repository_verify_integrity /** - * Verifies the integrity of the contents of a snapshot repository + * Verify the repository integrity. Verify the integrity of the contents of a + * snapshot repository. + *

    + * This API enables you to perform a comprehensive check of the contents of a + * repository, looking for any anomalies in its data or metadata which might + * prevent you from restoring snapshots from the repository or which might cause + * future snapshot create or delete operations to fail. + *

    + * If you suspect the integrity of the contents of one of your snapshot + * repositories, cease all write activity to this repository immediately, set + * its read_only option to true, and use this API to + * verify its integrity. Until you do so: + *

      + *
    • It may not be possible to restore some snapshots from this + * repository.
    • + *
    • Searchable snapshots may report errors when searched or may have + * unassigned shards.
    • + *
    • Taking snapshots into this repository may fail or may appear to succeed + * but have created a snapshot which cannot be restored.
    • + *
    • Deleting snapshots from this repository may fail or may appear to succeed + * but leave the underlying data on disk.
    • + *
    • Continuing to write to the repository while it is in an invalid state may + * causing additional damage to its contents.
    • + *
    + *

    + * If the API finds any problems with the integrity of the contents of your + * repository, Elasticsearch will not be able to repair the damage. The only way + * to bring the repository back into a fully working state after its contents + * have been damaged is by restoring its contents from a repository backup which + * was taken before the damage occurred. You must also identify what caused the + * damage and take action to prevent it from happening again. + *

    + * If you cannot restore a repository backup, register a new repository and use + * this for all future snapshot operations. In some cases it may be possible to + * recover some of the contents of a damaged repository, either by restoring as + * many of its snapshots as needed and taking new snapshots of the restored + * data, or by using the reindex API to copy data from any searchable snapshots + * mounted from the damaged repository. + *

    + * Avoid all operations which write to the repository while the verify + * repository integrity API is running. If something changes the repository + * contents while an integrity verification is running then Elasticsearch may + * incorrectly report having detected some anomalies in its contents due to the + * concurrent writes. It may also incorrectly fail to report some anomalies that + * the concurrent writes prevented it from detecting. + *

    + * NOTE: This API is intended for exploratory use by humans. You should expect + * the request parameters and the response format to vary in future versions. + *

    + * NOTE: This API may not work correctly in a mixed-version cluster. * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/verify-repo-integrity-api.html">Documentation * on elastic.co */ @@ -376,13 +447,62 @@ public RepositoryVerifyIntegrityResponse repositoryVerifyIntegrity(RepositoryVer } /** - * Verifies the integrity of the contents of a snapshot repository + * Verify the repository integrity. Verify the integrity of the contents of a + * snapshot repository. + *

    + * This API enables you to perform a comprehensive check of the contents of a + * repository, looking for any anomalies in its data or metadata which might + * prevent you from restoring snapshots from the repository or which might cause + * future snapshot create or delete operations to fail. + *

    + * If you suspect the integrity of the contents of one of your snapshot + * repositories, cease all write activity to this repository immediately, set + * its read_only option to true, and use this API to + * verify its integrity. Until you do so: + *

      + *
    • It may not be possible to restore some snapshots from this + * repository.
    • + *
    • Searchable snapshots may report errors when searched or may have + * unassigned shards.
    • + *
    • Taking snapshots into this repository may fail or may appear to succeed + * but have created a snapshot which cannot be restored.
    • + *
    • Deleting snapshots from this repository may fail or may appear to succeed + * but leave the underlying data on disk.
    • + *
    • Continuing to write to the repository while it is in an invalid state may + * causing additional damage to its contents.
    • + *
    + *

    + * If the API finds any problems with the integrity of the contents of your + * repository, Elasticsearch will not be able to repair the damage. The only way + * to bring the repository back into a fully working state after its contents + * have been damaged is by restoring its contents from a repository backup which + * was taken before the damage occurred. You must also identify what caused the + * damage and take action to prevent it from happening again. + *

    + * If you cannot restore a repository backup, register a new repository and use + * this for all future snapshot operations. In some cases it may be possible to + * recover some of the contents of a damaged repository, either by restoring as + * many of its snapshots as needed and taking new snapshots of the restored + * data, or by using the reindex API to copy data from any searchable snapshots + * mounted from the damaged repository. + *

    + * Avoid all operations which write to the repository while the verify + * repository integrity API is running. If something changes the repository + * contents while an integrity verification is running then Elasticsearch may + * incorrectly report having detected some anomalies in its contents due to the + * concurrent writes. It may also incorrectly fail to report some anomalies that + * the concurrent writes prevented it from detecting. + *

    + * NOTE: This API is intended for exploratory use by humans. You should expect + * the request parameters and the response format to vary in future versions. + *

    + * NOTE: This API may not work correctly in a mixed-version cluster. * * @param fn * a function that initializes a builder to create the * {@link RepositoryVerifyIntegrityRequest} * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/verify-repo-integrity-api.html">Documentation * on elastic.co */ @@ -395,10 +515,35 @@ public final RepositoryVerifyIntegrityResponse repositoryVerifyIntegrity( // ----- Endpoint: snapshot.restore /** - * Restores a snapshot. + * Restore a snapshot. Restore a snapshot of a cluster or data streams and + * indices. + *

    + * You can restore a snapshot only to a running cluster with an elected master + * node. The snapshot repository must be registered and available to the + * cluster. The snapshot and cluster versions must be compatible. + *

    + * To restore a snapshot, the cluster's global metadata must be writable. Ensure + * there are't any cluster blocks that prevent writes. The restore operation + * ignores index blocks. + *

    + * Before you restore a data stream, ensure the cluster contains a matching + * index template with data streams enabled. To check, use the index management + * feature in Kibana or the get index template API: + * + *

    +	 * GET _index_template/*?filter_path=index_templates.name,index_templates.index_template.index_patterns,index_templates.index_template.data_stream
    +	 * 
    +	 * 
    + *

    + * If no such template exists, you can create one or restore a cluster state + * that contains one. Without a matching index template, a data stream can't + * roll over or create backing indices. + *

    + * If your snapshot contains data from App Search or Workplace Search, you must + * restore the Enterprise Search encryption key before you restore the snapshot. * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/restore-snapshot-api.html">Documentation * on elastic.co */ @@ -410,13 +555,38 @@ public RestoreResponse restore(RestoreRequest request) throws IOException, Elast } /** - * Restores a snapshot. + * Restore a snapshot. Restore a snapshot of a cluster or data streams and + * indices. + *

    + * You can restore a snapshot only to a running cluster with an elected master + * node. The snapshot repository must be registered and available to the + * cluster. The snapshot and cluster versions must be compatible. + *

    + * To restore a snapshot, the cluster's global metadata must be writable. Ensure + * there are't any cluster blocks that prevent writes. The restore operation + * ignores index blocks. + *

    + * Before you restore a data stream, ensure the cluster contains a matching + * index template with data streams enabled. To check, use the index management + * feature in Kibana or the get index template API: + * + *

    +	 * GET _index_template/*?filter_path=index_templates.name,index_templates.index_template.index_patterns,index_templates.index_template.data_stream
    +	 * 
    +	 * 
    + *

    + * If no such template exists, you can create one or restore a cluster state + * that contains one. Without a matching index template, a data stream can't + * roll over or create backing indices. + *

    + * If your snapshot contains data from App Search or Workplace Search, you must + * restore the Enterprise Search encryption key before you restore the snapshot. * * @param fn * a function that initializes a builder to create the * {@link RestoreRequest} * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/restore-snapshot-api.html">Documentation * on elastic.co */ @@ -428,10 +598,24 @@ public final RestoreResponse restore(Function + * WARNING: Using the API to return the status of any snapshots other than + * currently running snapshots can be expensive. The API requires a read from + * the repository for each shard in each snapshot. For example, if you have 100 + * snapshots with 1,000 shards each, an API request that includes all snapshots + * will require 100,000 reads (100 snapshots x 1,000 shards). + *

    + * Depending on the latency of your storage, such requests can take an extremely + * long time to return results. These requests can also tax machine resources + * and, when using cloud storage, incur high processing costs. * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/get-snapshot-status-api.html">Documentation * on elastic.co */ @@ -443,13 +627,27 @@ public SnapshotStatusResponse status(SnapshotStatusRequest request) throws IOExc } /** - * Returns information about the status of a snapshot. + * Get the snapshot status. Get a detailed description of the current state for + * each shard participating in the snapshot. Note that this API should be used + * only to obtain detailed shard-level information for ongoing snapshots. If + * this detail is not needed or you want to obtain information about one or more + * existing snapshots, use the get snapshot API. + *

    + * WARNING: Using the API to return the status of any snapshots other than + * currently running snapshots can be expensive. The API requires a read from + * the repository for each shard in each snapshot. For example, if you have 100 + * snapshots with 1,000 shards each, an API request that includes all snapshots + * will require 100,000 reads (100 snapshots x 1,000 shards). + *

    + * Depending on the latency of your storage, such requests can take an extremely + * long time to return results. These requests can also tax machine resources + * and, when using cloud storage, incur high processing costs. * * @param fn * a function that initializes a builder to create the * {@link SnapshotStatusRequest} * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/get-snapshot-status-api.html">Documentation * on elastic.co */ @@ -460,10 +658,24 @@ public final SnapshotStatusResponse status( } /** - * Returns information about the status of a snapshot. + * Get the snapshot status. Get a detailed description of the current state for + * each shard participating in the snapshot. Note that this API should be used + * only to obtain detailed shard-level information for ongoing snapshots. If + * this detail is not needed or you want to obtain information about one or more + * existing snapshots, use the get snapshot API. + *

    + * WARNING: Using the API to return the status of any snapshots other than + * currently running snapshots can be expensive. The API requires a read from + * the repository for each shard in each snapshot. For example, if you have 100 + * snapshots with 1,000 shards each, an API request that includes all snapshots + * will require 100,000 reads (100 snapshots x 1,000 shards). + *

    + * Depending on the latency of your storage, such requests can take an extremely + * long time to return results. These requests can also tax machine resources + * and, when using cloud storage, incur high processing costs. * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/get-snapshot-status-api.html">Documentation * on elastic.co */ @@ -475,10 +687,11 @@ public SnapshotStatusResponse status() throws IOException, ElasticsearchExceptio // ----- Endpoint: snapshot.verify_repository /** - * Verifies a repository. + * Verify a snapshot repository. Check for common misconfigurations in a + * snapshot repository. * * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/verify-snapshot-repo-api.html">Documentation * on elastic.co */ @@ -491,13 +704,14 @@ public VerifyRepositoryResponse verifyRepository(VerifyRepositoryRequest request } /** - * Verifies a repository. + * Verify a snapshot repository. Check for common misconfigurations in a + * snapshot repository. * * @param fn * a function that initializes a builder to create the * {@link VerifyRepositoryRequest} * @see Documentation + * "https://www.elastic.co/guide/en/elasticsearch/reference/8.17/verify-snapshot-repo-api.html">Documentation * on elastic.co */ diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/snapshot/GetRepositoryRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/snapshot/GetRepositoryRequest.java index 55a08eeba..305661c71 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/snapshot/GetRepositoryRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/snapshot/GetRepositoryRequest.java @@ -59,7 +59,7 @@ // typedef: snapshot.get_repository.Request /** - * Returns information about a repository. + * Get snapshot repository information. * * @see API * specification diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/snapshot/GetSnapshotRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/snapshot/GetSnapshotRequest.java index 90544d289..b62955a32 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/snapshot/GetSnapshotRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/snapshot/GetSnapshotRequest.java @@ -61,7 +61,7 @@ // typedef: snapshot.get.Request /** - * Returns information about a snapshot. + * Get snapshot information. * * @see API * specification diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/snapshot/RepositoryVerifyIntegrityRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/snapshot/RepositoryVerifyIntegrityRequest.java index a71c3ea66..632463a4e 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/snapshot/RepositoryVerifyIntegrityRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/snapshot/RepositoryVerifyIntegrityRequest.java @@ -59,7 +59,56 @@ // typedef: snapshot.repository_verify_integrity.Request /** - * Verifies the integrity of the contents of a snapshot repository + * Verify the repository integrity. Verify the integrity of the contents of a + * snapshot repository. + *

    + * This API enables you to perform a comprehensive check of the contents of a + * repository, looking for any anomalies in its data or metadata which might + * prevent you from restoring snapshots from the repository or which might cause + * future snapshot create or delete operations to fail. + *

    + * If you suspect the integrity of the contents of one of your snapshot + * repositories, cease all write activity to this repository immediately, set + * its read_only option to true, and use this API to + * verify its integrity. Until you do so: + *

      + *
    • It may not be possible to restore some snapshots from this + * repository.
    • + *
    • Searchable snapshots may report errors when searched or may have + * unassigned shards.
    • + *
    • Taking snapshots into this repository may fail or may appear to succeed + * but have created a snapshot which cannot be restored.
    • + *
    • Deleting snapshots from this repository may fail or may appear to succeed + * but leave the underlying data on disk.
    • + *
    • Continuing to write to the repository while it is in an invalid state may + * causing additional damage to its contents.
    • + *
    + *

    + * If the API finds any problems with the integrity of the contents of your + * repository, Elasticsearch will not be able to repair the damage. The only way + * to bring the repository back into a fully working state after its contents + * have been damaged is by restoring its contents from a repository backup which + * was taken before the damage occurred. You must also identify what caused the + * damage and take action to prevent it from happening again. + *

    + * If you cannot restore a repository backup, register a new repository and use + * this for all future snapshot operations. In some cases it may be possible to + * recover some of the contents of a damaged repository, either by restoring as + * many of its snapshots as needed and taking new snapshots of the restored + * data, or by using the reindex API to copy data from any searchable snapshots + * mounted from the damaged repository. + *

    + * Avoid all operations which write to the repository while the verify + * repository integrity API is running. If something changes the repository + * contents while an integrity verification is running then Elasticsearch may + * incorrectly report having detected some anomalies in its contents due to the + * concurrent writes. It may also incorrectly fail to report some anomalies that + * the concurrent writes prevented it from detecting. + *

    + * NOTE: This API is intended for exploratory use by humans. You should expect + * the request parameters and the response format to vary in future versions. + *

    + * NOTE: This API may not work correctly in a mixed-version cluster. * * @see API diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/snapshot/RestoreRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/snapshot/RestoreRequest.java index 1176d6dac..509e7f8a9 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/snapshot/RestoreRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/snapshot/RestoreRequest.java @@ -61,7 +61,32 @@ // typedef: snapshot.restore.Request /** - * Restores a snapshot. + * Restore a snapshot. Restore a snapshot of a cluster or data streams and + * indices. + *

    + * You can restore a snapshot only to a running cluster with an elected master + * node. The snapshot repository must be registered and available to the + * cluster. The snapshot and cluster versions must be compatible. + *

    + * To restore a snapshot, the cluster's global metadata must be writable. Ensure + * there are't any cluster blocks that prevent writes. The restore operation + * ignores index blocks. + *

    + * Before you restore a data stream, ensure the cluster contains a matching + * index template with data streams enabled. To check, use the index management + * feature in Kibana or the get index template API: + * + *

    + * GET _index_template/*?filter_path=index_templates.name,index_templates.index_template.index_patterns,index_templates.index_template.data_stream
    + * 
    + * 
    + *

    + * If no such template exists, you can create one or restore a cluster state + * that contains one. Without a matching index template, a data stream can't + * roll over or create backing indices. + *

    + * If your snapshot contains data from App Search or Workplace Search, you must + * restore the Enterprise Search encryption key before you restore the snapshot. * * @see API * specification diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/snapshot/SnapshotStatusRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/snapshot/SnapshotStatusRequest.java index c9da0634a..45641441c 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/snapshot/SnapshotStatusRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/snapshot/SnapshotStatusRequest.java @@ -59,7 +59,21 @@ // typedef: snapshot.status.Request /** - * Returns information about the status of a snapshot. + * Get the snapshot status. Get a detailed description of the current state for + * each shard participating in the snapshot. Note that this API should be used + * only to obtain detailed shard-level information for ongoing snapshots. If + * this detail is not needed or you want to obtain information about one or more + * existing snapshots, use the get snapshot API. + *

    + * WARNING: Using the API to return the status of any snapshots other than + * currently running snapshots can be expensive. The API requires a read from + * the repository for each shard in each snapshot. For example, if you have 100 + * snapshots with 1,000 shards each, an API request that includes all snapshots + * will require 100,000 reads (100 snapshots x 1,000 shards). + *

    + * Depending on the latency of your storage, such requests can take an extremely + * long time to return results. These requests can also tax machine resources + * and, when using cloud storage, incur high processing costs. * * @see API * specification diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/snapshot/VerifyRepositoryRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/snapshot/VerifyRepositoryRequest.java index fffc7bc18..2371c4f0f 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/snapshot/VerifyRepositoryRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/snapshot/VerifyRepositoryRequest.java @@ -56,7 +56,8 @@ // typedef: snapshot.verify_repository.Request /** - * Verifies a repository. + * Verify a snapshot repository. Check for common misconfigurations in a + * snapshot repository. * * @see API diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/tasks/CancelRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/tasks/CancelRequest.java index f9d5ba574..565f08bdd 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/tasks/CancelRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/tasks/CancelRequest.java @@ -58,7 +58,19 @@ // typedef: tasks.cancel.Request /** - * Cancels a task, if it can be cancelled through an API. + * Cancel a task. A task may continue to run for some time after it has been + * cancelled because it may not be able to safely stop its current activity + * straight away. It is also possible that Elasticsearch must complete its work + * on other tasks before it can process the cancellation. The get task + * information API will continue to list these cancelled tasks until they + * complete. The cancelled flag in the response indicates that the cancellation + * command has been processed and the task will stop as soon as possible. + *

    + * To troubleshoot why a cancelled task does not complete promptly, use the get + * task information API with the ?detailed parameter to identify + * the other tasks the system is running. You can also use the node hot threads + * API to obtain detailed information about the work the system is doing instead + * of completing the cancelled task. * * @see API * specification diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/tasks/ElasticsearchTasksAsyncClient.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/tasks/ElasticsearchTasksAsyncClient.java index c7739d9f1..ff8866d15 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/tasks/ElasticsearchTasksAsyncClient.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/tasks/ElasticsearchTasksAsyncClient.java @@ -68,7 +68,19 @@ public ElasticsearchTasksAsyncClient withTransportOptions(@Nullable TransportOpt // ----- Endpoint: tasks.cancel /** - * Cancels a task, if it can be cancelled through an API. + * Cancel a task. A task may continue to run for some time after it has been + * cancelled because it may not be able to safely stop its current activity + * straight away. It is also possible that Elasticsearch must complete its work + * on other tasks before it can process the cancellation. The get task + * information API will continue to list these cancelled tasks until they + * complete. The cancelled flag in the response indicates that the cancellation + * command has been processed and the task will stop as soon as possible. + *

    + * To troubleshoot why a cancelled task does not complete promptly, use the get + * task information API with the ?detailed parameter to identify + * the other tasks the system is running. You can also use the node hot threads + * API to obtain detailed information about the work the system is doing instead + * of completing the cancelled task. * * @see Documentation @@ -83,7 +95,19 @@ public CompletableFuture cancel(CancelRequest request) { } /** - * Cancels a task, if it can be cancelled through an API. + * Cancel a task. A task may continue to run for some time after it has been + * cancelled because it may not be able to safely stop its current activity + * straight away. It is also possible that Elasticsearch must complete its work + * on other tasks before it can process the cancellation. The get task + * information API will continue to list these cancelled tasks until they + * complete. The cancelled flag in the response indicates that the cancellation + * command has been processed and the task will stop as soon as possible. + *

    + * To troubleshoot why a cancelled task does not complete promptly, use the get + * task information API with the ?detailed parameter to identify + * the other tasks the system is running. You can also use the node hot threads + * API to obtain detailed information about the work the system is doing instead + * of completing the cancelled task. * * @param fn * a function that initializes a builder to create the @@ -99,7 +123,19 @@ public final CompletableFuture cancel( } /** - * Cancels a task, if it can be cancelled through an API. + * Cancel a task. A task may continue to run for some time after it has been + * cancelled because it may not be able to safely stop its current activity + * straight away. It is also possible that Elasticsearch must complete its work + * on other tasks before it can process the cancellation. The get task + * information API will continue to list these cancelled tasks until they + * complete. The cancelled flag in the response indicates that the cancellation + * command has been processed and the task will stop as soon as possible. + *

    + * To troubleshoot why a cancelled task does not complete promptly, use the get + * task information API with the ?detailed parameter to identify + * the other tasks the system is running. You can also use the node hot threads + * API to obtain detailed information about the work the system is doing instead + * of completing the cancelled task. * * @see Documentation @@ -114,8 +150,8 @@ public CompletableFuture cancel() { // ----- Endpoint: tasks.get /** - * Get task information. Returns information about the tasks currently executing - * in the cluster. + * Get task information. Get information about a task currently running in the + * cluster. * * @see Documentation @@ -130,8 +166,8 @@ public CompletableFuture get(GetTasksRequest request) { } /** - * Get task information. Returns information about the tasks currently executing - * in the cluster. + * Get task information. Get information about a task currently running in the + * cluster. * * @param fn * a function that initializes a builder to create the @@ -149,8 +185,8 @@ public final CompletableFuture get( // ----- Endpoint: tasks.list /** - * The task management API returns information about tasks currently executing - * on one or more nodes in the cluster. + * Get all tasks. Get information about the tasks currently running on one or + * more nodes in the cluster. * * @see Documentation @@ -165,8 +201,8 @@ public CompletableFuture list(ListRequest request) { } /** - * The task management API returns information about tasks currently executing - * on one or more nodes in the cluster. + * Get all tasks. Get information about the tasks currently running on one or + * more nodes in the cluster. * * @param fn * a function that initializes a builder to create the @@ -181,8 +217,8 @@ public final CompletableFuture list(FunctionDocumentation diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/tasks/ElasticsearchTasksClient.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/tasks/ElasticsearchTasksClient.java index 71c3bb26f..4bee64783 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/tasks/ElasticsearchTasksClient.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/tasks/ElasticsearchTasksClient.java @@ -68,7 +68,19 @@ public ElasticsearchTasksClient withTransportOptions(@Nullable TransportOptions // ----- Endpoint: tasks.cancel /** - * Cancels a task, if it can be cancelled through an API. + * Cancel a task. A task may continue to run for some time after it has been + * cancelled because it may not be able to safely stop its current activity + * straight away. It is also possible that Elasticsearch must complete its work + * on other tasks before it can process the cancellation. The get task + * information API will continue to list these cancelled tasks until they + * complete. The cancelled flag in the response indicates that the cancellation + * command has been processed and the task will stop as soon as possible. + *

    + * To troubleshoot why a cancelled task does not complete promptly, use the get + * task information API with the ?detailed parameter to identify + * the other tasks the system is running. You can also use the node hot threads + * API to obtain detailed information about the work the system is doing instead + * of completing the cancelled task. * * @see Documentation @@ -83,7 +95,19 @@ public CancelResponse cancel(CancelRequest request) throws IOException, Elastics } /** - * Cancels a task, if it can be cancelled through an API. + * Cancel a task. A task may continue to run for some time after it has been + * cancelled because it may not be able to safely stop its current activity + * straight away. It is also possible that Elasticsearch must complete its work + * on other tasks before it can process the cancellation. The get task + * information API will continue to list these cancelled tasks until they + * complete. The cancelled flag in the response indicates that the cancellation + * command has been processed and the task will stop as soon as possible. + *

    + * To troubleshoot why a cancelled task does not complete promptly, use the get + * task information API with the ?detailed parameter to identify + * the other tasks the system is running. You can also use the node hot threads + * API to obtain detailed information about the work the system is doing instead + * of completing the cancelled task. * * @param fn * a function that initializes a builder to create the @@ -99,7 +123,19 @@ public final CancelResponse cancel(Function + * To troubleshoot why a cancelled task does not complete promptly, use the get + * task information API with the ?detailed parameter to identify + * the other tasks the system is running. You can also use the node hot threads + * API to obtain detailed information about the work the system is doing instead + * of completing the cancelled task. * * @see Documentation @@ -114,8 +150,8 @@ public CancelResponse cancel() throws IOException, ElasticsearchException { // ----- Endpoint: tasks.get /** - * Get task information. Returns information about the tasks currently executing - * in the cluster. + * Get task information. Get information about a task currently running in the + * cluster. * * @see Documentation @@ -130,8 +166,8 @@ public GetTasksResponse get(GetTasksRequest request) throws IOException, Elastic } /** - * Get task information. Returns information about the tasks currently executing - * in the cluster. + * Get task information. Get information about a task currently running in the + * cluster. * * @param fn * a function that initializes a builder to create the @@ -149,8 +185,8 @@ public final GetTasksResponse get(FunctionDocumentation @@ -165,8 +201,8 @@ public ListResponse list(ListRequest request) throws IOException, ElasticsearchE } /** - * The task management API returns information about tasks currently executing - * on one or more nodes in the cluster. + * Get all tasks. Get information about the tasks currently running on one or + * more nodes in the cluster. * * @param fn * a function that initializes a builder to create the @@ -182,8 +218,8 @@ public final ListResponse list(FunctionDocumentation diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/tasks/GetTasksRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/tasks/GetTasksRequest.java index ea9563e26..36c03dab6 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/tasks/GetTasksRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/tasks/GetTasksRequest.java @@ -57,8 +57,8 @@ // typedef: tasks.get.Request /** - * Get task information. Returns information about the tasks currently executing - * in the cluster. + * Get task information. Get information about a task currently running in the + * cluster. * * @see API * specification diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/tasks/ListRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/tasks/ListRequest.java index c212aa37b..7ca029327 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/tasks/ListRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/tasks/ListRequest.java @@ -60,8 +60,8 @@ // typedef: tasks.list.Request /** - * The task management API returns information about tasks currently executing - * on one or more nodes in the cluster. + * Get all tasks. Get information about the tasks currently running on one or + * more nodes in the cluster. * * @see API * specification @@ -121,7 +121,8 @@ public final List actions() { /** * If true, the response includes detailed information about shard - * recoveries. + * recoveries. This information is useful to distinguish tasks from each other + * but is more costly to run. *

    * API name: {@code detailed} */ @@ -251,7 +252,8 @@ public final Builder actions(String value, String... values) { /** * If true, the response includes detailed information about shard - * recoveries. + * recoveries. This information is useful to distinguish tasks from each other + * but is more costly to run. *

    * API name: {@code detailed} */ diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/tasks/TaskInfo.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/tasks/TaskInfo.java index 646858885..2494bdfcd 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/tasks/TaskInfo.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/tasks/TaskInfo.java @@ -142,6 +142,15 @@ public final boolean cancellable() { } /** + * Human readable text that identifies the particular request that the task is + * performing. For example, it might identify the search request being performed + * by a search task. Other kinds of tasks have different descriptions, like + * _reindex which has the source and the destination, or + * _bulk which just has the number of requests and the destination + * indices. Many requests will have only an empty description because more + * detailed information about the request is not easily available or + * particularly helpful in identifying the request. + *

    * API name: {@code description} */ @Nullable @@ -193,7 +202,12 @@ public final long startTimeInMillis() { } /** - * Task status information can vary wildly from task to task. + * The internal status of the task, which varies from task to task. The format + * also varies. While the goal is to keep the status for a particular task + * consistent from version to version, this is not always possible because + * sometimes the implementation changes. Fields might be removed from the status + * for a particular request so any parsing you do of the status might break in + * minor releases. *

    * API name: {@code status} */ @@ -377,6 +391,15 @@ public final BuilderT cancellable(boolean value) { } /** + * Human readable text that identifies the particular request that the task is + * performing. For example, it might identify the search request being performed + * by a search task. Other kinds of tasks have different descriptions, like + * _reindex which has the source and the destination, or + * _bulk which just has the number of requests and the destination + * indices. Many requests will have only an empty description because more + * detailed information about the request is not easily available or + * particularly helpful in identifying the request. + *

    * API name: {@code description} */ public final BuilderT description(@Nullable String value) { @@ -452,7 +475,12 @@ public final BuilderT startTimeInMillis(long value) { } /** - * Task status information can vary wildly from task to task. + * The internal status of the task, which varies from task to task. The format + * also varies. While the goal is to keep the status for a particular task + * consistent from version to version, this is not always possible because + * sometimes the implementation changes. Fields might be removed from the status + * for a particular request so any parsing you do of the status might break in + * minor releases. *

    * API name: {@code status} */ diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/text_structure/EcsCompatibilityType.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/text_structure/EcsCompatibilityType.java new file mode 100644 index 000000000..7fa5f5e8f --- /dev/null +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/text_structure/EcsCompatibilityType.java @@ -0,0 +1,67 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package co.elastic.clients.elasticsearch.text_structure; + +import co.elastic.clients.json.JsonEnum; +import co.elastic.clients.json.JsonpDeserializable; +import co.elastic.clients.json.JsonpDeserializer; + +//---------------------------------------------------------------- +// THIS CODE IS GENERATED. MANUAL EDITS WILL BE LOST. +//---------------------------------------------------------------- +// +// This code is generated from the Elasticsearch API specification +// at https://github.com/elastic/elasticsearch-specification +// +// Manual updates to this file will be lost when the code is +// re-generated. +// +// If you find a property that is missing or wrongly typed, please +// open an issue or a PR on the API specification repository. +// +//---------------------------------------------------------------- + +/** + * + * @see API + * specification + */ +@JsonpDeserializable +public enum EcsCompatibilityType implements JsonEnum { + Disabled("disabled"), + + V1("v1"), + + ; + + private final String jsonValue; + + EcsCompatibilityType(String jsonValue) { + this.jsonValue = jsonValue; + } + + public String jsonValue() { + return this.jsonValue; + } + + public static final JsonEnum.Deserializer _DESERIALIZER = new JsonEnum.Deserializer<>( + EcsCompatibilityType.values()); +} diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/text_structure/ElasticsearchTextStructureAsyncClient.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/text_structure/ElasticsearchTextStructureAsyncClient.java index 023a616f6..441661964 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/text_structure/ElasticsearchTextStructureAsyncClient.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/text_structure/ElasticsearchTextStructureAsyncClient.java @@ -67,10 +67,122 @@ public ElasticsearchTextStructureAsyncClient withTransportOptions(@Nullable Tran return new ElasticsearchTextStructureAsyncClient(this.transport, transportOptions); } + // ----- Endpoint: text_structure.find_field_structure + + /** + * Find the structure of a text field. Find the structure of a text field in an + * Elasticsearch index. + * + * @see Documentation + * on elastic.co + */ + + public CompletableFuture findFieldStructure(FindFieldStructureRequest request) { + @SuppressWarnings("unchecked") + JsonEndpoint endpoint = (JsonEndpoint) FindFieldStructureRequest._ENDPOINT; + + return this.transport.performRequestAsync(request, endpoint, this.transportOptions); + } + + /** + * Find the structure of a text field. Find the structure of a text field in an + * Elasticsearch index. + * + * @param fn + * a function that initializes a builder to create the + * {@link FindFieldStructureRequest} + * @see Documentation + * on elastic.co + */ + + public final CompletableFuture findFieldStructure( + Function> fn) { + return findFieldStructure(fn.apply(new FindFieldStructureRequest.Builder()).build()); + } + + // ----- Endpoint: text_structure.find_message_structure + + /** + * Find the structure of text messages. Find the structure of a list of text + * messages. The messages must contain data that is suitable to be ingested into + * Elasticsearch. + *

    + * This API provides a starting point for ingesting data into Elasticsearch in a + * format that is suitable for subsequent use with other Elastic Stack + * functionality. Use this API rather than the find text structure API if your + * input text has already been split up into separate messages by some other + * process. The response from the API contains: + *

      + *
    • Sample messages.
    • + *
    • Statistics that reveal the most common values for all fields detected + * within the text and basic numeric statistics for numeric fields.
    • + *
    • Information about the structure of the text, which is useful when you + * write ingest configurations to index it or similarly formatted text. + * Appropriate mappings for an Elasticsearch index, which you could use to + * ingest the text.
    • + *
    + *

    + * All this information can be calculated by the structure finder with no + * guidance. However, you can optionally override some of the decisions about + * the text structure by specifying one or more query parameters. + * + * @see Documentation + * on elastic.co + */ + + public CompletableFuture findMessageStructure(FindMessageStructureRequest request) { + @SuppressWarnings("unchecked") + JsonEndpoint endpoint = (JsonEndpoint) FindMessageStructureRequest._ENDPOINT; + + return this.transport.performRequestAsync(request, endpoint, this.transportOptions); + } + + /** + * Find the structure of text messages. Find the structure of a list of text + * messages. The messages must contain data that is suitable to be ingested into + * Elasticsearch. + *

    + * This API provides a starting point for ingesting data into Elasticsearch in a + * format that is suitable for subsequent use with other Elastic Stack + * functionality. Use this API rather than the find text structure API if your + * input text has already been split up into separate messages by some other + * process. The response from the API contains: + *

      + *
    • Sample messages.
    • + *
    • Statistics that reveal the most common values for all fields detected + * within the text and basic numeric statistics for numeric fields.
    • + *
    • Information about the structure of the text, which is useful when you + * write ingest configurations to index it or similarly formatted text. + * Appropriate mappings for an Elasticsearch index, which you could use to + * ingest the text.
    • + *
    + *

    + * All this information can be calculated by the structure finder with no + * guidance. However, you can optionally override some of the decisions about + * the text structure by specifying one or more query parameters. + * + * @param fn + * a function that initializes a builder to create the + * {@link FindMessageStructureRequest} + * @see Documentation + * on elastic.co + */ + + public final CompletableFuture findMessageStructure( + Function> fn) { + return findMessageStructure(fn.apply(new FindMessageStructureRequest.Builder()).build()); + } + // ----- Endpoint: text_structure.test_grok_pattern /** - * Tests a Grok pattern on some text. + * Test a Grok pattern. Test a Grok pattern on one or more lines of text. The + * API indicates whether the lines match the pattern together with the offsets + * and lengths of the matched substrings. * * @see Documentation @@ -85,7 +197,9 @@ public CompletableFuture testGrokPattern(TestGrokPatter } /** - * Tests a Grok pattern on some text. + * Test a Grok pattern. Test a Grok pattern on one or more lines of text. The + * API indicates whether the lines match the pattern together with the offsets + * and lengths of the matched substrings. * * @param fn * a function that initializes a builder to create the diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/text_structure/ElasticsearchTextStructureClient.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/text_structure/ElasticsearchTextStructureClient.java index 6fabfaaaa..9fa08f286 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/text_structure/ElasticsearchTextStructureClient.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/text_structure/ElasticsearchTextStructureClient.java @@ -68,10 +68,126 @@ public ElasticsearchTextStructureClient withTransportOptions(@Nullable Transport return new ElasticsearchTextStructureClient(this.transport, transportOptions); } + // ----- Endpoint: text_structure.find_field_structure + + /** + * Find the structure of a text field. Find the structure of a text field in an + * Elasticsearch index. + * + * @see Documentation + * on elastic.co + */ + + public FindFieldStructureResponse findFieldStructure(FindFieldStructureRequest request) + throws IOException, ElasticsearchException { + @SuppressWarnings("unchecked") + JsonEndpoint endpoint = (JsonEndpoint) FindFieldStructureRequest._ENDPOINT; + + return this.transport.performRequest(request, endpoint, this.transportOptions); + } + + /** + * Find the structure of a text field. Find the structure of a text field in an + * Elasticsearch index. + * + * @param fn + * a function that initializes a builder to create the + * {@link FindFieldStructureRequest} + * @see Documentation + * on elastic.co + */ + + public final FindFieldStructureResponse findFieldStructure( + Function> fn) + throws IOException, ElasticsearchException { + return findFieldStructure(fn.apply(new FindFieldStructureRequest.Builder()).build()); + } + + // ----- Endpoint: text_structure.find_message_structure + + /** + * Find the structure of text messages. Find the structure of a list of text + * messages. The messages must contain data that is suitable to be ingested into + * Elasticsearch. + *

    + * This API provides a starting point for ingesting data into Elasticsearch in a + * format that is suitable for subsequent use with other Elastic Stack + * functionality. Use this API rather than the find text structure API if your + * input text has already been split up into separate messages by some other + * process. The response from the API contains: + *

      + *
    • Sample messages.
    • + *
    • Statistics that reveal the most common values for all fields detected + * within the text and basic numeric statistics for numeric fields.
    • + *
    • Information about the structure of the text, which is useful when you + * write ingest configurations to index it or similarly formatted text. + * Appropriate mappings for an Elasticsearch index, which you could use to + * ingest the text.
    • + *
    + *

    + * All this information can be calculated by the structure finder with no + * guidance. However, you can optionally override some of the decisions about + * the text structure by specifying one or more query parameters. + * + * @see Documentation + * on elastic.co + */ + + public FindMessageStructureResponse findMessageStructure(FindMessageStructureRequest request) + throws IOException, ElasticsearchException { + @SuppressWarnings("unchecked") + JsonEndpoint endpoint = (JsonEndpoint) FindMessageStructureRequest._ENDPOINT; + + return this.transport.performRequest(request, endpoint, this.transportOptions); + } + + /** + * Find the structure of text messages. Find the structure of a list of text + * messages. The messages must contain data that is suitable to be ingested into + * Elasticsearch. + *

    + * This API provides a starting point for ingesting data into Elasticsearch in a + * format that is suitable for subsequent use with other Elastic Stack + * functionality. Use this API rather than the find text structure API if your + * input text has already been split up into separate messages by some other + * process. The response from the API contains: + *

      + *
    • Sample messages.
    • + *
    • Statistics that reveal the most common values for all fields detected + * within the text and basic numeric statistics for numeric fields.
    • + *
    • Information about the structure of the text, which is useful when you + * write ingest configurations to index it or similarly formatted text. + * Appropriate mappings for an Elasticsearch index, which you could use to + * ingest the text.
    • + *
    + *

    + * All this information can be calculated by the structure finder with no + * guidance. However, you can optionally override some of the decisions about + * the text structure by specifying one or more query parameters. + * + * @param fn + * a function that initializes a builder to create the + * {@link FindMessageStructureRequest} + * @see Documentation + * on elastic.co + */ + + public final FindMessageStructureResponse findMessageStructure( + Function> fn) + throws IOException, ElasticsearchException { + return findMessageStructure(fn.apply(new FindMessageStructureRequest.Builder()).build()); + } + // ----- Endpoint: text_structure.test_grok_pattern /** - * Tests a Grok pattern on some text. + * Test a Grok pattern. Test a Grok pattern on one or more lines of text. The + * API indicates whether the lines match the pattern together with the offsets + * and lengths of the matched substrings. * * @see Documentation @@ -87,7 +203,9 @@ public TestGrokPatternResponse testGrokPattern(TestGrokPatternRequest request) } /** - * Tests a Grok pattern on some text. + * Test a Grok pattern. Test a Grok pattern on one or more lines of text. The + * API indicates whether the lines match the pattern together with the offsets + * and lengths of the matched substrings. * * @param fn * a function that initializes a builder to create the diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/text_structure/FieldStat.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/text_structure/FieldStat.java new file mode 100644 index 000000000..e0bd56c0d --- /dev/null +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/text_structure/FieldStat.java @@ -0,0 +1,406 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package co.elastic.clients.elasticsearch.text_structure; + +import co.elastic.clients.json.JsonpDeserializable; +import co.elastic.clients.json.JsonpDeserializer; +import co.elastic.clients.json.JsonpMapper; +import co.elastic.clients.json.JsonpSerializable; +import co.elastic.clients.json.JsonpUtils; +import co.elastic.clients.json.ObjectBuilderDeserializer; +import co.elastic.clients.json.ObjectDeserializer; +import co.elastic.clients.util.ApiTypeHelper; +import co.elastic.clients.util.ObjectBuilder; +import co.elastic.clients.util.WithJsonObjectBuilderBase; +import jakarta.json.stream.JsonGenerator; +import java.lang.Integer; +import java.lang.String; +import java.util.List; +import java.util.Objects; +import java.util.function.Function; +import javax.annotation.Nullable; + +//---------------------------------------------------------------- +// THIS CODE IS GENERATED. MANUAL EDITS WILL BE LOST. +//---------------------------------------------------------------- +// +// This code is generated from the Elasticsearch API specification +// at https://github.com/elastic/elasticsearch-specification +// +// Manual updates to this file will be lost when the code is +// re-generated. +// +// If you find a property that is missing or wrongly typed, please +// open an issue or a PR on the API specification repository. +// +//---------------------------------------------------------------- + +// typedef: text_structure._types.FieldStat + +/** + * + * @see API + * specification + */ +@JsonpDeserializable +public class FieldStat implements JsonpSerializable { + private final int count; + + private final int cardinality; + + private final List topHits; + + @Nullable + private final Integer meanValue; + + @Nullable + private final Integer medianValue; + + @Nullable + private final Integer maxValue; + + @Nullable + private final Integer minValue; + + @Nullable + private final String earliest; + + @Nullable + private final String latest; + + // --------------------------------------------------------------------------------------------- + + private FieldStat(Builder builder) { + + this.count = ApiTypeHelper.requireNonNull(builder.count, this, "count"); + this.cardinality = ApiTypeHelper.requireNonNull(builder.cardinality, this, "cardinality"); + this.topHits = ApiTypeHelper.unmodifiableRequired(builder.topHits, this, "topHits"); + this.meanValue = builder.meanValue; + this.medianValue = builder.medianValue; + this.maxValue = builder.maxValue; + this.minValue = builder.minValue; + this.earliest = builder.earliest; + this.latest = builder.latest; + + } + + public static FieldStat of(Function> fn) { + return fn.apply(new Builder()).build(); + } + + /** + * Required - API name: {@code count} + */ + public final int count() { + return this.count; + } + + /** + * Required - API name: {@code cardinality} + */ + public final int cardinality() { + return this.cardinality; + } + + /** + * Required - API name: {@code top_hits} + */ + public final List topHits() { + return this.topHits; + } + + /** + * API name: {@code mean_value} + */ + @Nullable + public final Integer meanValue() { + return this.meanValue; + } + + /** + * API name: {@code median_value} + */ + @Nullable + public final Integer medianValue() { + return this.medianValue; + } + + /** + * API name: {@code max_value} + */ + @Nullable + public final Integer maxValue() { + return this.maxValue; + } + + /** + * API name: {@code min_value} + */ + @Nullable + public final Integer minValue() { + return this.minValue; + } + + /** + * API name: {@code earliest} + */ + @Nullable + public final String earliest() { + return this.earliest; + } + + /** + * API name: {@code latest} + */ + @Nullable + public final String latest() { + return this.latest; + } + + /** + * Serialize this object to JSON. + */ + public void serialize(JsonGenerator generator, JsonpMapper mapper) { + generator.writeStartObject(); + serializeInternal(generator, mapper); + generator.writeEnd(); + } + + protected void serializeInternal(JsonGenerator generator, JsonpMapper mapper) { + + generator.writeKey("count"); + generator.write(this.count); + + generator.writeKey("cardinality"); + generator.write(this.cardinality); + + if (ApiTypeHelper.isDefined(this.topHits)) { + generator.writeKey("top_hits"); + generator.writeStartArray(); + for (TopHit item0 : this.topHits) { + item0.serialize(generator, mapper); + + } + generator.writeEnd(); + + } + if (this.meanValue != null) { + generator.writeKey("mean_value"); + generator.write(this.meanValue); + + } + if (this.medianValue != null) { + generator.writeKey("median_value"); + generator.write(this.medianValue); + + } + if (this.maxValue != null) { + generator.writeKey("max_value"); + generator.write(this.maxValue); + + } + if (this.minValue != null) { + generator.writeKey("min_value"); + generator.write(this.minValue); + + } + if (this.earliest != null) { + generator.writeKey("earliest"); + generator.write(this.earliest); + + } + if (this.latest != null) { + generator.writeKey("latest"); + generator.write(this.latest); + + } + + } + + @Override + public String toString() { + return JsonpUtils.toString(this); + } + + // --------------------------------------------------------------------------------------------- + + /** + * Builder for {@link FieldStat}. + */ + + public static class Builder extends WithJsonObjectBuilderBase implements ObjectBuilder { + private Integer count; + + private Integer cardinality; + + private List topHits; + + @Nullable + private Integer meanValue; + + @Nullable + private Integer medianValue; + + @Nullable + private Integer maxValue; + + @Nullable + private Integer minValue; + + @Nullable + private String earliest; + + @Nullable + private String latest; + + /** + * Required - API name: {@code count} + */ + public final Builder count(int value) { + this.count = value; + return this; + } + + /** + * Required - API name: {@code cardinality} + */ + public final Builder cardinality(int value) { + this.cardinality = value; + return this; + } + + /** + * Required - API name: {@code top_hits} + *

    + * Adds all elements of list to topHits. + */ + public final Builder topHits(List list) { + this.topHits = _listAddAll(this.topHits, list); + return this; + } + + /** + * Required - API name: {@code top_hits} + *

    + * Adds one or more values to topHits. + */ + public final Builder topHits(TopHit value, TopHit... values) { + this.topHits = _listAdd(this.topHits, value, values); + return this; + } + + /** + * Required - API name: {@code top_hits} + *

    + * Adds a value to topHits using a builder lambda. + */ + public final Builder topHits(Function> fn) { + return topHits(fn.apply(new TopHit.Builder()).build()); + } + + /** + * API name: {@code mean_value} + */ + public final Builder meanValue(@Nullable Integer value) { + this.meanValue = value; + return this; + } + + /** + * API name: {@code median_value} + */ + public final Builder medianValue(@Nullable Integer value) { + this.medianValue = value; + return this; + } + + /** + * API name: {@code max_value} + */ + public final Builder maxValue(@Nullable Integer value) { + this.maxValue = value; + return this; + } + + /** + * API name: {@code min_value} + */ + public final Builder minValue(@Nullable Integer value) { + this.minValue = value; + return this; + } + + /** + * API name: {@code earliest} + */ + public final Builder earliest(@Nullable String value) { + this.earliest = value; + return this; + } + + /** + * API name: {@code latest} + */ + public final Builder latest(@Nullable String value) { + this.latest = value; + return this; + } + + @Override + protected Builder self() { + return this; + } + + /** + * Builds a {@link FieldStat}. + * + * @throws NullPointerException + * if some of the required fields are null. + */ + public FieldStat build() { + _checkSingleUse(); + + return new FieldStat(this); + } + } + + // --------------------------------------------------------------------------------------------- + + /** + * Json deserializer for {@link FieldStat} + */ + public static final JsonpDeserializer _DESERIALIZER = ObjectBuilderDeserializer.lazy(Builder::new, + FieldStat::setupFieldStatDeserializer); + + protected static void setupFieldStatDeserializer(ObjectDeserializer op) { + + op.add(Builder::count, JsonpDeserializer.integerDeserializer(), "count"); + op.add(Builder::cardinality, JsonpDeserializer.integerDeserializer(), "cardinality"); + op.add(Builder::topHits, JsonpDeserializer.arrayDeserializer(TopHit._DESERIALIZER), "top_hits"); + op.add(Builder::meanValue, JsonpDeserializer.integerDeserializer(), "mean_value"); + op.add(Builder::medianValue, JsonpDeserializer.integerDeserializer(), "median_value"); + op.add(Builder::maxValue, JsonpDeserializer.integerDeserializer(), "max_value"); + op.add(Builder::minValue, JsonpDeserializer.integerDeserializer(), "min_value"); + op.add(Builder::earliest, JsonpDeserializer.stringDeserializer(), "earliest"); + op.add(Builder::latest, JsonpDeserializer.stringDeserializer(), "latest"); + + } + +} diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/text_structure/FindFieldStructureRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/text_structure/FindFieldStructureRequest.java new file mode 100644 index 000000000..ccd22538c --- /dev/null +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/text_structure/FindFieldStructureRequest.java @@ -0,0 +1,764 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package co.elastic.clients.elasticsearch.text_structure; + +import co.elastic.clients.elasticsearch._types.ErrorResponse; +import co.elastic.clients.elasticsearch._types.RequestBase; +import co.elastic.clients.elasticsearch._types.Time; +import co.elastic.clients.json.JsonpDeserializable; +import co.elastic.clients.json.JsonpDeserializer; +import co.elastic.clients.json.ObjectBuilderDeserializer; +import co.elastic.clients.json.ObjectDeserializer; +import co.elastic.clients.transport.Endpoint; +import co.elastic.clients.transport.endpoints.SimpleEndpoint; +import co.elastic.clients.util.ApiTypeHelper; +import co.elastic.clients.util.ObjectBuilder; +import jakarta.json.stream.JsonGenerator; +import java.lang.Boolean; +import java.lang.Number; +import java.lang.String; +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; +import java.util.Objects; +import java.util.function.Function; +import javax.annotation.Nullable; + +//---------------------------------------------------------------- +// THIS CODE IS GENERATED. MANUAL EDITS WILL BE LOST. +//---------------------------------------------------------------- +// +// This code is generated from the Elasticsearch API specification +// at https://github.com/elastic/elasticsearch-specification +// +// Manual updates to this file will be lost when the code is +// re-generated. +// +// If you find a property that is missing or wrongly typed, please +// open an issue or a PR on the API specification repository. +// +//---------------------------------------------------------------- + +// typedef: text_structure.find_field_structure.Request + +/** + * Find the structure of a text field. Find the structure of a text field in an + * Elasticsearch index. + * + * @see API + * specification + */ + +public class FindFieldStructureRequest extends RequestBase { + @Nullable + private final String columnNames; + + @Nullable + private final String delimiter; + + @Nullable + private final Number documentsToSample; + + @Nullable + private final EcsCompatibilityType ecsCompatibility; + + @Nullable + private final Boolean explain; + + private final String field; + + @Nullable + private final FormatType format; + + @Nullable + private final String grokPattern; + + private final String index; + + @Nullable + private final String quote; + + @Nullable + private final Boolean shouldTrimFields; + + @Nullable + private final Time timeout; + + @Nullable + private final String timestampField; + + @Nullable + private final String timestampFormat; + + // --------------------------------------------------------------------------------------------- + + private FindFieldStructureRequest(Builder builder) { + + this.columnNames = builder.columnNames; + this.delimiter = builder.delimiter; + this.documentsToSample = builder.documentsToSample; + this.ecsCompatibility = builder.ecsCompatibility; + this.explain = builder.explain; + this.field = ApiTypeHelper.requireNonNull(builder.field, this, "field"); + this.format = builder.format; + this.grokPattern = builder.grokPattern; + this.index = ApiTypeHelper.requireNonNull(builder.index, this, "index"); + this.quote = builder.quote; + this.shouldTrimFields = builder.shouldTrimFields; + this.timeout = builder.timeout; + this.timestampField = builder.timestampField; + this.timestampFormat = builder.timestampFormat; + + } + + public static FindFieldStructureRequest of(Function> fn) { + return fn.apply(new Builder()).build(); + } + + /** + * If format is set to delimited, you can specify the + * column names in a comma-separated list. If this parameter is not specified, + * the structure finder uses the column names from the header row of the text. + * If the text does not have a header row, columns are named + * "column1", "column2", "column3", for example. + *

    + * API name: {@code column_names} + */ + @Nullable + public final String columnNames() { + return this.columnNames; + } + + /** + * If you have set format to delimited, you can + * specify the character used to delimit the values in each row. Only a single + * character is supported; the delimiter cannot have multiple characters. By + * default, the API considers the following possibilities: comma, tab, + * semi-colon, and pipe (|). In this default scenario, all rows + * must have the same number of fields for the delimited format to be detected. + * If you specify a delimiter, up to 10% of the rows can have a different number + * of columns than the first row. + *

    + * API name: {@code delimiter} + */ + @Nullable + public final String delimiter() { + return this.delimiter; + } + + /** + * The number of documents to include in the structural analysis. The minimum + * value is 2. + *

    + * API name: {@code documents_to_sample} + */ + @Nullable + public final Number documentsToSample() { + return this.documentsToSample; + } + + /** + * The mode of compatibility with ECS compliant Grok patterns. Use this + * parameter to specify whether to use ECS Grok patterns instead of legacy ones + * when the structure finder creates a Grok pattern. This setting primarily has + * an impact when a whole message Grok pattern such as + * %{CATALINALOG} matches the input. If the structure finder + * identifies a common structure but has no idea of the meaning then generic + * field names such as path, ipaddress, + * field1, and field2 are used in the + * grok_pattern output. The intention in that situation is that a + * user who knows the meanings will rename the fields before using them. + *

    + * API name: {@code ecs_compatibility} + */ + @Nullable + public final EcsCompatibilityType ecsCompatibility() { + return this.ecsCompatibility; + } + + /** + * If true, the response includes a field named explanation, which + * is an array of strings that indicate how the structure finder produced its + * result. + *

    + * API name: {@code explain} + */ + @Nullable + public final Boolean explain() { + return this.explain; + } + + /** + * Required - The field that should be analyzed. + *

    + * API name: {@code field} + */ + public final String field() { + return this.field; + } + + /** + * The high level structure of the text. By default, the API chooses the format. + * In this default scenario, all rows must have the same number of fields for a + * delimited format to be detected. If the format is set to delimited and the + * delimiter is not set, however, the API tolerates up to 5% of rows that have a + * different number of columns than the first row. + *

    + * API name: {@code format} + */ + @Nullable + public final FormatType format() { + return this.format; + } + + /** + * If the format is semi_structured_text, you can specify a Grok + * pattern that is used to extract fields from every message in the text. The + * name of the timestamp field in the Grok pattern must match what is specified + * in the timestamp_field parameter. If that parameter is not + * specified, the name of the timestamp field in the Grok pattern must match + * "timestamp". If grok_pattern is not specified, the + * structure finder creates a Grok pattern. + *

    + * API name: {@code grok_pattern} + */ + @Nullable + public final String grokPattern() { + return this.grokPattern; + } + + /** + * Required - The name of the index that contains the analyzed field. + *

    + * API name: {@code index} + */ + public final String index() { + return this.index; + } + + /** + * If the format is delimited, you can specify the character used + * to quote the values in each row if they contain newlines or the delimiter + * character. Only a single character is supported. If this parameter is not + * specified, the default value is a double quote ("). If your + * delimited text format does not use quoting, a workaround is to set this + * argument to a character that does not appear anywhere in the sample. + *

    + * API name: {@code quote} + */ + @Nullable + public final String quote() { + return this.quote; + } + + /** + * If the format is delimited, you can specify whether values + * between delimiters should have whitespace trimmed from them. If this + * parameter is not specified and the delimiter is pipe (|), the + * default value is true. Otherwise, the default value is false. + *

    + * API name: {@code should_trim_fields} + */ + @Nullable + public final Boolean shouldTrimFields() { + return this.shouldTrimFields; + } + + /** + * The maximum amount of time that the structure analysis can take. If the + * analysis is still running when the timeout expires, it will be stopped. + *

    + * API name: {@code timeout} + */ + @Nullable + public final Time timeout() { + return this.timeout; + } + + /** + * The name of the field that contains the primary timestamp of each record in + * the text. In particular, if the text was ingested into an index, this is the + * field that would be used to populate the @timestamp field. + *

    + * If the format is semi_structured_text, this field must match the + * name of the appropriate extraction in the grok_pattern. + * Therefore, for semi-structured text, it is best not to specify this parameter + * unless grok_pattern is also specified. + *

    + * For structured text, if you specify this parameter, the field must exist + * within the text. + *

    + * If this parameter is not specified, the structure finder makes a decision + * about which field (if any) is the primary timestamp field. For structured + * text, it is not compulsory to have a timestamp in the text. + *

    + * API name: {@code timestamp_field} + */ + @Nullable + public final String timestampField() { + return this.timestampField; + } + + /** + * The Java time format of the timestamp field in the text. Only a subset of + * Java time format letter groups are supported: + *

      + *
    • a
    • + *
    • d
    • + *
    • dd
    • + *
    • EEE
    • + *
    • EEEE
    • + *
    • H
    • + *
    • HH
    • + *
    • h
    • + *
    • M
    • + *
    • MM
    • + *
    • MMM
    • + *
    • MMMM
    • + *
    • mm
    • + *
    • ss
    • + *
    • XX
    • + *
    • XXX
    • + *
    • yy
    • + *
    • yyyy
    • + *
    • zzz
    • + *
    + *

    + * Additionally S letter groups (fractional seconds) of length one + * to nine are supported providing they occur after ss and are + * separated from the ss by a period (.), comma + * (,), or colon (:). Spacing and punctuation is also + * permitted with the exception a question mark (?), newline, and + * carriage return, together with literal text enclosed in single quotes. For + * example, MM/dd HH.mm.ss,SSSSSS 'in' yyyy is a valid override + * format. + *

    + * One valuable use case for this parameter is when the format is + * semi-structured text, there are multiple timestamp formats in the text, and + * you know which format corresponds to the primary timestamp, but you do not + * want to specify the full grok_pattern. Another is when the + * timestamp format is one that the structure finder does not consider by + * default. + *

    + * If this parameter is not specified, the structure finder chooses the best + * format from a built-in set. + *

    + * If the special value null is specified, the structure finder + * will not look for a primary timestamp in the text. When the format is + * semi-structured text, this will result in the structure finder treating the + * text as single-line messages. + *

    + * API name: {@code timestamp_format} + */ + @Nullable + public final String timestampFormat() { + return this.timestampFormat; + } + + // --------------------------------------------------------------------------------------------- + + /** + * Builder for {@link FindFieldStructureRequest}. + */ + + public static class Builder extends RequestBase.AbstractBuilder + implements + ObjectBuilder { + @Nullable + private String columnNames; + + @Nullable + private String delimiter; + + @Nullable + private Number documentsToSample; + + @Nullable + private EcsCompatibilityType ecsCompatibility; + + @Nullable + private Boolean explain; + + private String field; + + @Nullable + private FormatType format; + + @Nullable + private String grokPattern; + + private String index; + + @Nullable + private String quote; + + @Nullable + private Boolean shouldTrimFields; + + @Nullable + private Time timeout; + + @Nullable + private String timestampField; + + @Nullable + private String timestampFormat; + + /** + * If format is set to delimited, you can specify the + * column names in a comma-separated list. If this parameter is not specified, + * the structure finder uses the column names from the header row of the text. + * If the text does not have a header row, columns are named + * "column1", "column2", "column3", for example. + *

    + * API name: {@code column_names} + */ + public final Builder columnNames(@Nullable String value) { + this.columnNames = value; + return this; + } + + /** + * If you have set format to delimited, you can + * specify the character used to delimit the values in each row. Only a single + * character is supported; the delimiter cannot have multiple characters. By + * default, the API considers the following possibilities: comma, tab, + * semi-colon, and pipe (|). In this default scenario, all rows + * must have the same number of fields for the delimited format to be detected. + * If you specify a delimiter, up to 10% of the rows can have a different number + * of columns than the first row. + *

    + * API name: {@code delimiter} + */ + public final Builder delimiter(@Nullable String value) { + this.delimiter = value; + return this; + } + + /** + * The number of documents to include in the structural analysis. The minimum + * value is 2. + *

    + * API name: {@code documents_to_sample} + */ + public final Builder documentsToSample(@Nullable Number value) { + this.documentsToSample = value; + return this; + } + + /** + * The mode of compatibility with ECS compliant Grok patterns. Use this + * parameter to specify whether to use ECS Grok patterns instead of legacy ones + * when the structure finder creates a Grok pattern. This setting primarily has + * an impact when a whole message Grok pattern such as + * %{CATALINALOG} matches the input. If the structure finder + * identifies a common structure but has no idea of the meaning then generic + * field names such as path, ipaddress, + * field1, and field2 are used in the + * grok_pattern output. The intention in that situation is that a + * user who knows the meanings will rename the fields before using them. + *

    + * API name: {@code ecs_compatibility} + */ + public final Builder ecsCompatibility(@Nullable EcsCompatibilityType value) { + this.ecsCompatibility = value; + return this; + } + + /** + * If true, the response includes a field named explanation, which + * is an array of strings that indicate how the structure finder produced its + * result. + *

    + * API name: {@code explain} + */ + public final Builder explain(@Nullable Boolean value) { + this.explain = value; + return this; + } + + /** + * Required - The field that should be analyzed. + *

    + * API name: {@code field} + */ + public final Builder field(String value) { + this.field = value; + return this; + } + + /** + * The high level structure of the text. By default, the API chooses the format. + * In this default scenario, all rows must have the same number of fields for a + * delimited format to be detected. If the format is set to delimited and the + * delimiter is not set, however, the API tolerates up to 5% of rows that have a + * different number of columns than the first row. + *

    + * API name: {@code format} + */ + public final Builder format(@Nullable FormatType value) { + this.format = value; + return this; + } + + /** + * If the format is semi_structured_text, you can specify a Grok + * pattern that is used to extract fields from every message in the text. The + * name of the timestamp field in the Grok pattern must match what is specified + * in the timestamp_field parameter. If that parameter is not + * specified, the name of the timestamp field in the Grok pattern must match + * "timestamp". If grok_pattern is not specified, the + * structure finder creates a Grok pattern. + *

    + * API name: {@code grok_pattern} + */ + public final Builder grokPattern(@Nullable String value) { + this.grokPattern = value; + return this; + } + + /** + * Required - The name of the index that contains the analyzed field. + *

    + * API name: {@code index} + */ + public final Builder index(String value) { + this.index = value; + return this; + } + + /** + * If the format is delimited, you can specify the character used + * to quote the values in each row if they contain newlines or the delimiter + * character. Only a single character is supported. If this parameter is not + * specified, the default value is a double quote ("). If your + * delimited text format does not use quoting, a workaround is to set this + * argument to a character that does not appear anywhere in the sample. + *

    + * API name: {@code quote} + */ + public final Builder quote(@Nullable String value) { + this.quote = value; + return this; + } + + /** + * If the format is delimited, you can specify whether values + * between delimiters should have whitespace trimmed from them. If this + * parameter is not specified and the delimiter is pipe (|), the + * default value is true. Otherwise, the default value is false. + *

    + * API name: {@code should_trim_fields} + */ + public final Builder shouldTrimFields(@Nullable Boolean value) { + this.shouldTrimFields = value; + return this; + } + + /** + * The maximum amount of time that the structure analysis can take. If the + * analysis is still running when the timeout expires, it will be stopped. + *

    + * API name: {@code timeout} + */ + public final Builder timeout(@Nullable Time value) { + this.timeout = value; + return this; + } + + /** + * The maximum amount of time that the structure analysis can take. If the + * analysis is still running when the timeout expires, it will be stopped. + *

    + * API name: {@code timeout} + */ + public final Builder timeout(Function> fn) { + return this.timeout(fn.apply(new Time.Builder()).build()); + } + + /** + * The name of the field that contains the primary timestamp of each record in + * the text. In particular, if the text was ingested into an index, this is the + * field that would be used to populate the @timestamp field. + *

    + * If the format is semi_structured_text, this field must match the + * name of the appropriate extraction in the grok_pattern. + * Therefore, for semi-structured text, it is best not to specify this parameter + * unless grok_pattern is also specified. + *

    + * For structured text, if you specify this parameter, the field must exist + * within the text. + *

    + * If this parameter is not specified, the structure finder makes a decision + * about which field (if any) is the primary timestamp field. For structured + * text, it is not compulsory to have a timestamp in the text. + *

    + * API name: {@code timestamp_field} + */ + public final Builder timestampField(@Nullable String value) { + this.timestampField = value; + return this; + } + + /** + * The Java time format of the timestamp field in the text. Only a subset of + * Java time format letter groups are supported: + *

      + *
    • a
    • + *
    • d
    • + *
    • dd
    • + *
    • EEE
    • + *
    • EEEE
    • + *
    • H
    • + *
    • HH
    • + *
    • h
    • + *
    • M
    • + *
    • MM
    • + *
    • MMM
    • + *
    • MMMM
    • + *
    • mm
    • + *
    • ss
    • + *
    • XX
    • + *
    • XXX
    • + *
    • yy
    • + *
    • yyyy
    • + *
    • zzz
    • + *
    + *

    + * Additionally S letter groups (fractional seconds) of length one + * to nine are supported providing they occur after ss and are + * separated from the ss by a period (.), comma + * (,), or colon (:). Spacing and punctuation is also + * permitted with the exception a question mark (?), newline, and + * carriage return, together with literal text enclosed in single quotes. For + * example, MM/dd HH.mm.ss,SSSSSS 'in' yyyy is a valid override + * format. + *

    + * One valuable use case for this parameter is when the format is + * semi-structured text, there are multiple timestamp formats in the text, and + * you know which format corresponds to the primary timestamp, but you do not + * want to specify the full grok_pattern. Another is when the + * timestamp format is one that the structure finder does not consider by + * default. + *

    + * If this parameter is not specified, the structure finder chooses the best + * format from a built-in set. + *

    + * If the special value null is specified, the structure finder + * will not look for a primary timestamp in the text. When the format is + * semi-structured text, this will result in the structure finder treating the + * text as single-line messages. + *

    + * API name: {@code timestamp_format} + */ + public final Builder timestampFormat(@Nullable String value) { + this.timestampFormat = value; + return this; + } + + @Override + protected Builder self() { + return this; + } + + /** + * Builds a {@link FindFieldStructureRequest}. + * + * @throws NullPointerException + * if some of the required fields are null. + */ + public FindFieldStructureRequest build() { + _checkSingleUse(); + + return new FindFieldStructureRequest(this); + } + } + + // --------------------------------------------------------------------------------------------- + + /** + * Endpoint "{@code text_structure.find_field_structure}". + */ + public static final Endpoint _ENDPOINT = new SimpleEndpoint<>( + "es/text_structure.find_field_structure", + + // Request method + request -> { + return "GET"; + + }, + + // Request path + request -> { + return "/_text_structure/find_field_structure"; + + }, + + // Path parameters + request -> { + return Collections.emptyMap(); + }, + + // Request parameters + request -> { + Map params = new HashMap<>(); + if (request.explain != null) { + params.put("explain", String.valueOf(request.explain)); + } + if (request.format != null) { + params.put("format", request.format.jsonValue()); + } + params.put("index", request.index); + if (request.timestampField != null) { + params.put("timestamp_field", request.timestampField); + } + if (request.timeout != null) { + params.put("timeout", request.timeout._toJsonString()); + } + if (request.timestampFormat != null) { + params.put("timestamp_format", request.timestampFormat); + } + if (request.quote != null) { + params.put("quote", request.quote); + } + if (request.shouldTrimFields != null) { + params.put("should_trim_fields", String.valueOf(request.shouldTrimFields)); + } + params.put("field", request.field); + if (request.grokPattern != null) { + params.put("grok_pattern", request.grokPattern); + } + if (request.delimiter != null) { + params.put("delimiter", request.delimiter); + } + if (request.columnNames != null) { + params.put("column_names", request.columnNames); + } + if (request.documentsToSample != null) { + params.put("documents_to_sample", request.documentsToSample.toString()); + } + if (request.ecsCompatibility != null) { + params.put("ecs_compatibility", request.ecsCompatibility.jsonValue()); + } + return params; + + }, SimpleEndpoint.emptyMap(), false, FindFieldStructureResponse._DESERIALIZER); +} diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/text_structure/FindFieldStructureResponse.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/text_structure/FindFieldStructureResponse.java new file mode 100644 index 000000000..8d59b84b7 --- /dev/null +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/text_structure/FindFieldStructureResponse.java @@ -0,0 +1,604 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package co.elastic.clients.elasticsearch.text_structure; + +import co.elastic.clients.elasticsearch._types.mapping.TypeMapping; +import co.elastic.clients.elasticsearch.ingest.PipelineConfig; +import co.elastic.clients.json.JsonpDeserializable; +import co.elastic.clients.json.JsonpDeserializer; +import co.elastic.clients.json.JsonpMapper; +import co.elastic.clients.json.JsonpSerializable; +import co.elastic.clients.json.JsonpUtils; +import co.elastic.clients.json.ObjectBuilderDeserializer; +import co.elastic.clients.json.ObjectDeserializer; +import co.elastic.clients.util.ApiTypeHelper; +import co.elastic.clients.util.ObjectBuilder; +import co.elastic.clients.util.WithJsonObjectBuilderBase; +import jakarta.json.stream.JsonGenerator; +import java.lang.Boolean; +import java.lang.Integer; +import java.lang.String; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.function.Function; +import javax.annotation.Nullable; + +//---------------------------------------------------------------- +// THIS CODE IS GENERATED. MANUAL EDITS WILL BE LOST. +//---------------------------------------------------------------- +// +// This code is generated from the Elasticsearch API specification +// at https://github.com/elastic/elasticsearch-specification +// +// Manual updates to this file will be lost when the code is +// re-generated. +// +// If you find a property that is missing or wrongly typed, please +// open an issue or a PR on the API specification repository. +// +//---------------------------------------------------------------- + +// typedef: text_structure.find_field_structure.Response + +/** + * + * @see API + * specification + */ +@JsonpDeserializable +public class FindFieldStructureResponse implements JsonpSerializable { + private final String charset; + + @Nullable + private final EcsCompatibilityType ecsCompatibility; + + private final Map fieldStats; + + private final FormatType format; + + @Nullable + private final String grokPattern; + + private final List javaTimestampFormats; + + private final List jodaTimestampFormats; + + private final PipelineConfig ingestPipeline; + + private final TypeMapping mappings; + + @Nullable + private final String multilineStartPattern; + + private final boolean needClientTimezone; + + private final int numLinesAnalyzed; + + private final int numMessagesAnalyzed; + + private final String sampleStart; + + @Nullable + private final String timestampField; + + // --------------------------------------------------------------------------------------------- + + private FindFieldStructureResponse(Builder builder) { + + this.charset = ApiTypeHelper.requireNonNull(builder.charset, this, "charset"); + this.ecsCompatibility = builder.ecsCompatibility; + this.fieldStats = ApiTypeHelper.unmodifiableRequired(builder.fieldStats, this, "fieldStats"); + this.format = ApiTypeHelper.requireNonNull(builder.format, this, "format"); + this.grokPattern = builder.grokPattern; + this.javaTimestampFormats = ApiTypeHelper.unmodifiable(builder.javaTimestampFormats); + this.jodaTimestampFormats = ApiTypeHelper.unmodifiable(builder.jodaTimestampFormats); + this.ingestPipeline = ApiTypeHelper.requireNonNull(builder.ingestPipeline, this, "ingestPipeline"); + this.mappings = ApiTypeHelper.requireNonNull(builder.mappings, this, "mappings"); + this.multilineStartPattern = builder.multilineStartPattern; + this.needClientTimezone = ApiTypeHelper.requireNonNull(builder.needClientTimezone, this, "needClientTimezone"); + this.numLinesAnalyzed = ApiTypeHelper.requireNonNull(builder.numLinesAnalyzed, this, "numLinesAnalyzed"); + this.numMessagesAnalyzed = ApiTypeHelper.requireNonNull(builder.numMessagesAnalyzed, this, + "numMessagesAnalyzed"); + this.sampleStart = ApiTypeHelper.requireNonNull(builder.sampleStart, this, "sampleStart"); + this.timestampField = builder.timestampField; + + } + + public static FindFieldStructureResponse of(Function> fn) { + return fn.apply(new Builder()).build(); + } + + /** + * Required - API name: {@code charset} + */ + public final String charset() { + return this.charset; + } + + /** + * API name: {@code ecs_compatibility} + */ + @Nullable + public final EcsCompatibilityType ecsCompatibility() { + return this.ecsCompatibility; + } + + /** + * Required - API name: {@code field_stats} + */ + public final Map fieldStats() { + return this.fieldStats; + } + + /** + * Required - API name: {@code format} + */ + public final FormatType format() { + return this.format; + } + + /** + * API name: {@code grok_pattern} + */ + @Nullable + public final String grokPattern() { + return this.grokPattern; + } + + /** + * API name: {@code java_timestamp_formats} + */ + public final List javaTimestampFormats() { + return this.javaTimestampFormats; + } + + /** + * API name: {@code joda_timestamp_formats} + */ + public final List jodaTimestampFormats() { + return this.jodaTimestampFormats; + } + + /** + * Required - API name: {@code ingest_pipeline} + */ + public final PipelineConfig ingestPipeline() { + return this.ingestPipeline; + } + + /** + * Required - API name: {@code mappings} + */ + public final TypeMapping mappings() { + return this.mappings; + } + + /** + * API name: {@code multiline_start_pattern} + */ + @Nullable + public final String multilineStartPattern() { + return this.multilineStartPattern; + } + + /** + * Required - API name: {@code need_client_timezone} + */ + public final boolean needClientTimezone() { + return this.needClientTimezone; + } + + /** + * Required - API name: {@code num_lines_analyzed} + */ + public final int numLinesAnalyzed() { + return this.numLinesAnalyzed; + } + + /** + * Required - API name: {@code num_messages_analyzed} + */ + public final int numMessagesAnalyzed() { + return this.numMessagesAnalyzed; + } + + /** + * Required - API name: {@code sample_start} + */ + public final String sampleStart() { + return this.sampleStart; + } + + /** + * API name: {@code timestamp_field} + */ + @Nullable + public final String timestampField() { + return this.timestampField; + } + + /** + * Serialize this object to JSON. + */ + public void serialize(JsonGenerator generator, JsonpMapper mapper) { + generator.writeStartObject(); + serializeInternal(generator, mapper); + generator.writeEnd(); + } + + protected void serializeInternal(JsonGenerator generator, JsonpMapper mapper) { + + generator.writeKey("charset"); + generator.write(this.charset); + + if (this.ecsCompatibility != null) { + generator.writeKey("ecs_compatibility"); + this.ecsCompatibility.serialize(generator, mapper); + } + if (ApiTypeHelper.isDefined(this.fieldStats)) { + generator.writeKey("field_stats"); + generator.writeStartObject(); + for (Map.Entry item0 : this.fieldStats.entrySet()) { + generator.writeKey(item0.getKey()); + item0.getValue().serialize(generator, mapper); + + } + generator.writeEnd(); + + } + generator.writeKey("format"); + this.format.serialize(generator, mapper); + if (this.grokPattern != null) { + generator.writeKey("grok_pattern"); + generator.write(this.grokPattern); + + } + if (ApiTypeHelper.isDefined(this.javaTimestampFormats)) { + generator.writeKey("java_timestamp_formats"); + generator.writeStartArray(); + for (String item0 : this.javaTimestampFormats) { + generator.write(item0); + + } + generator.writeEnd(); + + } + if (ApiTypeHelper.isDefined(this.jodaTimestampFormats)) { + generator.writeKey("joda_timestamp_formats"); + generator.writeStartArray(); + for (String item0 : this.jodaTimestampFormats) { + generator.write(item0); + + } + generator.writeEnd(); + + } + generator.writeKey("ingest_pipeline"); + this.ingestPipeline.serialize(generator, mapper); + + generator.writeKey("mappings"); + this.mappings.serialize(generator, mapper); + + if (this.multilineStartPattern != null) { + generator.writeKey("multiline_start_pattern"); + generator.write(this.multilineStartPattern); + + } + generator.writeKey("need_client_timezone"); + generator.write(this.needClientTimezone); + + generator.writeKey("num_lines_analyzed"); + generator.write(this.numLinesAnalyzed); + + generator.writeKey("num_messages_analyzed"); + generator.write(this.numMessagesAnalyzed); + + generator.writeKey("sample_start"); + generator.write(this.sampleStart); + + if (this.timestampField != null) { + generator.writeKey("timestamp_field"); + generator.write(this.timestampField); + + } + + } + + @Override + public String toString() { + return JsonpUtils.toString(this); + } + + // --------------------------------------------------------------------------------------------- + + /** + * Builder for {@link FindFieldStructureResponse}. + */ + + public static class Builder extends WithJsonObjectBuilderBase + implements + ObjectBuilder { + private String charset; + + @Nullable + private EcsCompatibilityType ecsCompatibility; + + private Map fieldStats; + + private FormatType format; + + @Nullable + private String grokPattern; + + @Nullable + private List javaTimestampFormats; + + @Nullable + private List jodaTimestampFormats; + + private PipelineConfig ingestPipeline; + + private TypeMapping mappings; + + @Nullable + private String multilineStartPattern; + + private Boolean needClientTimezone; + + private Integer numLinesAnalyzed; + + private Integer numMessagesAnalyzed; + + private String sampleStart; + + @Nullable + private String timestampField; + + /** + * Required - API name: {@code charset} + */ + public final Builder charset(String value) { + this.charset = value; + return this; + } + + /** + * API name: {@code ecs_compatibility} + */ + public final Builder ecsCompatibility(@Nullable EcsCompatibilityType value) { + this.ecsCompatibility = value; + return this; + } + + /** + * Required - API name: {@code field_stats} + *

    + * Adds all entries of map to fieldStats. + */ + public final Builder fieldStats(Map map) { + this.fieldStats = _mapPutAll(this.fieldStats, map); + return this; + } + + /** + * Required - API name: {@code field_stats} + *

    + * Adds an entry to fieldStats. + */ + public final Builder fieldStats(String key, FieldStat value) { + this.fieldStats = _mapPut(this.fieldStats, key, value); + return this; + } + + /** + * Required - API name: {@code field_stats} + *

    + * Adds an entry to fieldStats using a builder lambda. + */ + public final Builder fieldStats(String key, Function> fn) { + return fieldStats(key, fn.apply(new FieldStat.Builder()).build()); + } + + /** + * Required - API name: {@code format} + */ + public final Builder format(FormatType value) { + this.format = value; + return this; + } + + /** + * API name: {@code grok_pattern} + */ + public final Builder grokPattern(@Nullable String value) { + this.grokPattern = value; + return this; + } + + /** + * API name: {@code java_timestamp_formats} + *

    + * Adds all elements of list to javaTimestampFormats. + */ + public final Builder javaTimestampFormats(List list) { + this.javaTimestampFormats = _listAddAll(this.javaTimestampFormats, list); + return this; + } + + /** + * API name: {@code java_timestamp_formats} + *

    + * Adds one or more values to javaTimestampFormats. + */ + public final Builder javaTimestampFormats(String value, String... values) { + this.javaTimestampFormats = _listAdd(this.javaTimestampFormats, value, values); + return this; + } + + /** + * API name: {@code joda_timestamp_formats} + *

    + * Adds all elements of list to jodaTimestampFormats. + */ + public final Builder jodaTimestampFormats(List list) { + this.jodaTimestampFormats = _listAddAll(this.jodaTimestampFormats, list); + return this; + } + + /** + * API name: {@code joda_timestamp_formats} + *

    + * Adds one or more values to jodaTimestampFormats. + */ + public final Builder jodaTimestampFormats(String value, String... values) { + this.jodaTimestampFormats = _listAdd(this.jodaTimestampFormats, value, values); + return this; + } + + /** + * Required - API name: {@code ingest_pipeline} + */ + public final Builder ingestPipeline(PipelineConfig value) { + this.ingestPipeline = value; + return this; + } + + /** + * Required - API name: {@code ingest_pipeline} + */ + public final Builder ingestPipeline(Function> fn) { + return this.ingestPipeline(fn.apply(new PipelineConfig.Builder()).build()); + } + + /** + * Required - API name: {@code mappings} + */ + public final Builder mappings(TypeMapping value) { + this.mappings = value; + return this; + } + + /** + * Required - API name: {@code mappings} + */ + public final Builder mappings(Function> fn) { + return this.mappings(fn.apply(new TypeMapping.Builder()).build()); + } + + /** + * API name: {@code multiline_start_pattern} + */ + public final Builder multilineStartPattern(@Nullable String value) { + this.multilineStartPattern = value; + return this; + } + + /** + * Required - API name: {@code need_client_timezone} + */ + public final Builder needClientTimezone(boolean value) { + this.needClientTimezone = value; + return this; + } + + /** + * Required - API name: {@code num_lines_analyzed} + */ + public final Builder numLinesAnalyzed(int value) { + this.numLinesAnalyzed = value; + return this; + } + + /** + * Required - API name: {@code num_messages_analyzed} + */ + public final Builder numMessagesAnalyzed(int value) { + this.numMessagesAnalyzed = value; + return this; + } + + /** + * Required - API name: {@code sample_start} + */ + public final Builder sampleStart(String value) { + this.sampleStart = value; + return this; + } + + /** + * API name: {@code timestamp_field} + */ + public final Builder timestampField(@Nullable String value) { + this.timestampField = value; + return this; + } + + @Override + protected Builder self() { + return this; + } + + /** + * Builds a {@link FindFieldStructureResponse}. + * + * @throws NullPointerException + * if some of the required fields are null. + */ + public FindFieldStructureResponse build() { + _checkSingleUse(); + + return new FindFieldStructureResponse(this); + } + } + + // --------------------------------------------------------------------------------------------- + + /** + * Json deserializer for {@link FindFieldStructureResponse} + */ + public static final JsonpDeserializer _DESERIALIZER = ObjectBuilderDeserializer + .lazy(Builder::new, FindFieldStructureResponse::setupFindFieldStructureResponseDeserializer); + + protected static void setupFindFieldStructureResponseDeserializer( + ObjectDeserializer op) { + + op.add(Builder::charset, JsonpDeserializer.stringDeserializer(), "charset"); + op.add(Builder::ecsCompatibility, EcsCompatibilityType._DESERIALIZER, "ecs_compatibility"); + op.add(Builder::fieldStats, JsonpDeserializer.stringMapDeserializer(FieldStat._DESERIALIZER), "field_stats"); + op.add(Builder::format, FormatType._DESERIALIZER, "format"); + op.add(Builder::grokPattern, JsonpDeserializer.stringDeserializer(), "grok_pattern"); + op.add(Builder::javaTimestampFormats, + JsonpDeserializer.arrayDeserializer(JsonpDeserializer.stringDeserializer()), "java_timestamp_formats"); + op.add(Builder::jodaTimestampFormats, + JsonpDeserializer.arrayDeserializer(JsonpDeserializer.stringDeserializer()), "joda_timestamp_formats"); + op.add(Builder::ingestPipeline, PipelineConfig._DESERIALIZER, "ingest_pipeline"); + op.add(Builder::mappings, TypeMapping._DESERIALIZER, "mappings"); + op.add(Builder::multilineStartPattern, JsonpDeserializer.stringDeserializer(), "multiline_start_pattern"); + op.add(Builder::needClientTimezone, JsonpDeserializer.booleanDeserializer(), "need_client_timezone"); + op.add(Builder::numLinesAnalyzed, JsonpDeserializer.integerDeserializer(), "num_lines_analyzed"); + op.add(Builder::numMessagesAnalyzed, JsonpDeserializer.integerDeserializer(), "num_messages_analyzed"); + op.add(Builder::sampleStart, JsonpDeserializer.stringDeserializer(), "sample_start"); + op.add(Builder::timestampField, JsonpDeserializer.stringDeserializer(), "timestamp_field"); + + } + +} diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/text_structure/FindMessageStructureRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/text_structure/FindMessageStructureRequest.java new file mode 100644 index 000000000..f9513af92 --- /dev/null +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/text_structure/FindMessageStructureRequest.java @@ -0,0 +1,782 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package co.elastic.clients.elasticsearch.text_structure; + +import co.elastic.clients.elasticsearch._types.ErrorResponse; +import co.elastic.clients.elasticsearch._types.RequestBase; +import co.elastic.clients.elasticsearch._types.Time; +import co.elastic.clients.json.JsonpDeserializable; +import co.elastic.clients.json.JsonpDeserializer; +import co.elastic.clients.json.JsonpMapper; +import co.elastic.clients.json.JsonpSerializable; +import co.elastic.clients.json.ObjectBuilderDeserializer; +import co.elastic.clients.json.ObjectDeserializer; +import co.elastic.clients.transport.Endpoint; +import co.elastic.clients.transport.endpoints.SimpleEndpoint; +import co.elastic.clients.util.ApiTypeHelper; +import co.elastic.clients.util.ObjectBuilder; +import jakarta.json.stream.JsonGenerator; +import java.lang.Boolean; +import java.lang.String; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.function.Function; +import javax.annotation.Nullable; + +//---------------------------------------------------------------- +// THIS CODE IS GENERATED. MANUAL EDITS WILL BE LOST. +//---------------------------------------------------------------- +// +// This code is generated from the Elasticsearch API specification +// at https://github.com/elastic/elasticsearch-specification +// +// Manual updates to this file will be lost when the code is +// re-generated. +// +// If you find a property that is missing or wrongly typed, please +// open an issue or a PR on the API specification repository. +// +//---------------------------------------------------------------- + +// typedef: text_structure.find_message_structure.Request + +/** + * Find the structure of text messages. Find the structure of a list of text + * messages. The messages must contain data that is suitable to be ingested into + * Elasticsearch. + *

    + * This API provides a starting point for ingesting data into Elasticsearch in a + * format that is suitable for subsequent use with other Elastic Stack + * functionality. Use this API rather than the find text structure API if your + * input text has already been split up into separate messages by some other + * process. The response from the API contains: + *

      + *
    • Sample messages.
    • + *
    • Statistics that reveal the most common values for all fields detected + * within the text and basic numeric statistics for numeric fields.
    • + *
    • Information about the structure of the text, which is useful when you + * write ingest configurations to index it or similarly formatted text. + * Appropriate mappings for an Elasticsearch index, which you could use to + * ingest the text.
    • + *
    + *

    + * All this information can be calculated by the structure finder with no + * guidance. However, you can optionally override some of the decisions about + * the text structure by specifying one or more query parameters. + * + * @see API + * specification + */ +@JsonpDeserializable +public class FindMessageStructureRequest extends RequestBase implements JsonpSerializable { + @Nullable + private final String columnNames; + + @Nullable + private final String delimiter; + + @Nullable + private final EcsCompatibilityType ecsCompatibility; + + @Nullable + private final Boolean explain; + + @Nullable + private final FormatType format; + + @Nullable + private final String grokPattern; + + private final List messages; + + @Nullable + private final String quote; + + @Nullable + private final Boolean shouldTrimFields; + + @Nullable + private final Time timeout; + + @Nullable + private final String timestampField; + + @Nullable + private final String timestampFormat; + + // --------------------------------------------------------------------------------------------- + + private FindMessageStructureRequest(Builder builder) { + + this.columnNames = builder.columnNames; + this.delimiter = builder.delimiter; + this.ecsCompatibility = builder.ecsCompatibility; + this.explain = builder.explain; + this.format = builder.format; + this.grokPattern = builder.grokPattern; + this.messages = ApiTypeHelper.unmodifiableRequired(builder.messages, this, "messages"); + this.quote = builder.quote; + this.shouldTrimFields = builder.shouldTrimFields; + this.timeout = builder.timeout; + this.timestampField = builder.timestampField; + this.timestampFormat = builder.timestampFormat; + + } + + public static FindMessageStructureRequest of(Function> fn) { + return fn.apply(new Builder()).build(); + } + + /** + * If the format is delimited, you can specify the column names in + * a comma-separated list. If this parameter is not specified, the structure + * finder uses the column names from the header row of the text. If the text + * does not have a header role, columns are named "column1", + * "column2", "column3", for example. + *

    + * API name: {@code column_names} + */ + @Nullable + public final String columnNames() { + return this.columnNames; + } + + /** + * If you the format is delimited, you can specify the character + * used to delimit the values in each row. Only a single character is supported; + * the delimiter cannot have multiple characters. By default, the API considers + * the following possibilities: comma, tab, semi-colon, and pipe + * (|). In this default scenario, all rows must have the same + * number of fields for the delimited format to be detected. If you specify a + * delimiter, up to 10% of the rows can have a different number of columns than + * the first row. + *

    + * API name: {@code delimiter} + */ + @Nullable + public final String delimiter() { + return this.delimiter; + } + + /** + * The mode of compatibility with ECS compliant Grok patterns. Use this + * parameter to specify whether to use ECS Grok patterns instead of legacy ones + * when the structure finder creates a Grok pattern. This setting primarily has + * an impact when a whole message Grok pattern such as + * %{CATALINALOG} matches the input. If the structure finder + * identifies a common structure but has no idea of meaning then generic field + * names such as path, ipaddress, field1, + * and field2 are used in the grok_pattern output, + * with the intention that a user who knows the meanings rename these fields + * before using it. + *

    + * API name: {@code ecs_compatibility} + */ + @Nullable + public final EcsCompatibilityType ecsCompatibility() { + return this.ecsCompatibility; + } + + /** + * If this parameter is set to true, the response includes a field named + * explanation, which is an array of strings that indicate how the + * structure finder produced its result. + *

    + * API name: {@code explain} + */ + @Nullable + public final Boolean explain() { + return this.explain; + } + + /** + * The high level structure of the text. By default, the API chooses the format. + * In this default scenario, all rows must have the same number of fields for a + * delimited format to be detected. If the format is delimited and + * the delimiter is not set, however, the API tolerates up to 5% of rows that + * have a different number of columns than the first row. + *

    + * API name: {@code format} + */ + @Nullable + public final FormatType format() { + return this.format; + } + + /** + * If the format is semi_structured_text, you can specify a Grok + * pattern that is used to extract fields from every message in the text. The + * name of the timestamp field in the Grok pattern must match what is specified + * in the timestamp_field parameter. If that parameter is not + * specified, the name of the timestamp field in the Grok pattern must match + * "timestamp". If grok_pattern is not specified, the + * structure finder creates a Grok pattern. + *

    + * API name: {@code grok_pattern} + */ + @Nullable + public final String grokPattern() { + return this.grokPattern; + } + + /** + * Required - The list of messages you want to analyze. + *

    + * API name: {@code messages} + */ + public final List messages() { + return this.messages; + } + + /** + * If the format is delimited, you can specify the character used + * to quote the values in each row if they contain newlines or the delimiter + * character. Only a single character is supported. If this parameter is not + * specified, the default value is a double quote ("). If your + * delimited text format does not use quoting, a workaround is to set this + * argument to a character that does not appear anywhere in the sample. + *

    + * API name: {@code quote} + */ + @Nullable + public final String quote() { + return this.quote; + } + + /** + * If the format is delimited, you can specify whether values + * between delimiters should have whitespace trimmed from them. If this + * parameter is not specified and the delimiter is pipe (|), the + * default value is true. Otherwise, the default value is false. + *

    + * API name: {@code should_trim_fields} + */ + @Nullable + public final Boolean shouldTrimFields() { + return this.shouldTrimFields; + } + + /** + * The maximum amount of time that the structure analysis can take. If the + * analysis is still running when the timeout expires, it will be stopped. + *

    + * API name: {@code timeout} + */ + @Nullable + public final Time timeout() { + return this.timeout; + } + + /** + * The name of the field that contains the primary timestamp of each record in + * the text. In particular, if the text was ingested into an index, this is the + * field that would be used to populate the @timestamp field. + *

    + * If the format is semi_structured_text, this field must match the + * name of the appropriate extraction in the grok_pattern. + * Therefore, for semi-structured text, it is best not to specify this parameter + * unless grok_pattern is also specified. + *

    + * For structured text, if you specify this parameter, the field must exist + * within the text. + *

    + * If this parameter is not specified, the structure finder makes a decision + * about which field (if any) is the primary timestamp field. For structured + * text, it is not compulsory to have a timestamp in the text. + *

    + * API name: {@code timestamp_field} + */ + @Nullable + public final String timestampField() { + return this.timestampField; + } + + /** + * The Java time format of the timestamp field in the text. Only a subset of + * Java time format letter groups are supported: + *

      + *
    • a
    • + *
    • d
    • + *
    • dd
    • + *
    • EEE
    • + *
    • EEEE
    • + *
    • H
    • + *
    • HH
    • + *
    • h
    • + *
    • M
    • + *
    • MM
    • + *
    • MMM
    • + *
    • MMMM
    • + *
    • mm
    • + *
    • ss
    • + *
    • XX
    • + *
    • XXX
    • + *
    • yy
    • + *
    • yyyy
    • + *
    • zzz
    • + *
    + *

    + * Additionally S letter groups (fractional seconds) of length one + * to nine are supported providing they occur after ss and are + * separated from the ss by a period (.), comma + * (,), or colon (:). Spacing and punctuation is also + * permitted with the exception a question mark (?), newline, and + * carriage return, together with literal text enclosed in single quotes. For + * example, MM/dd HH.mm.ss,SSSSSS 'in' yyyy is a valid override + * format. + *

    + * One valuable use case for this parameter is when the format is + * semi-structured text, there are multiple timestamp formats in the text, and + * you know which format corresponds to the primary timestamp, but you do not + * want to specify the full grok_pattern. Another is when the + * timestamp format is one that the structure finder does not consider by + * default. + *

    + * If this parameter is not specified, the structure finder chooses the best + * format from a built-in set. + *

    + * If the special value null is specified, the structure finder + * will not look for a primary timestamp in the text. When the format is + * semi-structured text, this will result in the structure finder treating the + * text as single-line messages. + *

    + * API name: {@code timestamp_format} + */ + @Nullable + public final String timestampFormat() { + return this.timestampFormat; + } + + /** + * Serialize this object to JSON. + */ + public void serialize(JsonGenerator generator, JsonpMapper mapper) { + generator.writeStartObject(); + serializeInternal(generator, mapper); + generator.writeEnd(); + } + + protected void serializeInternal(JsonGenerator generator, JsonpMapper mapper) { + + if (ApiTypeHelper.isDefined(this.messages)) { + generator.writeKey("messages"); + generator.writeStartArray(); + for (String item0 : this.messages) { + generator.write(item0); + + } + generator.writeEnd(); + + } + + } + + // --------------------------------------------------------------------------------------------- + + /** + * Builder for {@link FindMessageStructureRequest}. + */ + + public static class Builder extends RequestBase.AbstractBuilder + implements + ObjectBuilder { + @Nullable + private String columnNames; + + @Nullable + private String delimiter; + + @Nullable + private EcsCompatibilityType ecsCompatibility; + + @Nullable + private Boolean explain; + + @Nullable + private FormatType format; + + @Nullable + private String grokPattern; + + private List messages; + + @Nullable + private String quote; + + @Nullable + private Boolean shouldTrimFields; + + @Nullable + private Time timeout; + + @Nullable + private String timestampField; + + @Nullable + private String timestampFormat; + + /** + * If the format is delimited, you can specify the column names in + * a comma-separated list. If this parameter is not specified, the structure + * finder uses the column names from the header row of the text. If the text + * does not have a header role, columns are named "column1", + * "column2", "column3", for example. + *

    + * API name: {@code column_names} + */ + public final Builder columnNames(@Nullable String value) { + this.columnNames = value; + return this; + } + + /** + * If you the format is delimited, you can specify the character + * used to delimit the values in each row. Only a single character is supported; + * the delimiter cannot have multiple characters. By default, the API considers + * the following possibilities: comma, tab, semi-colon, and pipe + * (|). In this default scenario, all rows must have the same + * number of fields for the delimited format to be detected. If you specify a + * delimiter, up to 10% of the rows can have a different number of columns than + * the first row. + *

    + * API name: {@code delimiter} + */ + public final Builder delimiter(@Nullable String value) { + this.delimiter = value; + return this; + } + + /** + * The mode of compatibility with ECS compliant Grok patterns. Use this + * parameter to specify whether to use ECS Grok patterns instead of legacy ones + * when the structure finder creates a Grok pattern. This setting primarily has + * an impact when a whole message Grok pattern such as + * %{CATALINALOG} matches the input. If the structure finder + * identifies a common structure but has no idea of meaning then generic field + * names such as path, ipaddress, field1, + * and field2 are used in the grok_pattern output, + * with the intention that a user who knows the meanings rename these fields + * before using it. + *

    + * API name: {@code ecs_compatibility} + */ + public final Builder ecsCompatibility(@Nullable EcsCompatibilityType value) { + this.ecsCompatibility = value; + return this; + } + + /** + * If this parameter is set to true, the response includes a field named + * explanation, which is an array of strings that indicate how the + * structure finder produced its result. + *

    + * API name: {@code explain} + */ + public final Builder explain(@Nullable Boolean value) { + this.explain = value; + return this; + } + + /** + * The high level structure of the text. By default, the API chooses the format. + * In this default scenario, all rows must have the same number of fields for a + * delimited format to be detected. If the format is delimited and + * the delimiter is not set, however, the API tolerates up to 5% of rows that + * have a different number of columns than the first row. + *

    + * API name: {@code format} + */ + public final Builder format(@Nullable FormatType value) { + this.format = value; + return this; + } + + /** + * If the format is semi_structured_text, you can specify a Grok + * pattern that is used to extract fields from every message in the text. The + * name of the timestamp field in the Grok pattern must match what is specified + * in the timestamp_field parameter. If that parameter is not + * specified, the name of the timestamp field in the Grok pattern must match + * "timestamp". If grok_pattern is not specified, the + * structure finder creates a Grok pattern. + *

    + * API name: {@code grok_pattern} + */ + public final Builder grokPattern(@Nullable String value) { + this.grokPattern = value; + return this; + } + + /** + * Required - The list of messages you want to analyze. + *

    + * API name: {@code messages} + *

    + * Adds all elements of list to messages. + */ + public final Builder messages(List list) { + this.messages = _listAddAll(this.messages, list); + return this; + } + + /** + * Required - The list of messages you want to analyze. + *

    + * API name: {@code messages} + *

    + * Adds one or more values to messages. + */ + public final Builder messages(String value, String... values) { + this.messages = _listAdd(this.messages, value, values); + return this; + } + + /** + * If the format is delimited, you can specify the character used + * to quote the values in each row if they contain newlines or the delimiter + * character. Only a single character is supported. If this parameter is not + * specified, the default value is a double quote ("). If your + * delimited text format does not use quoting, a workaround is to set this + * argument to a character that does not appear anywhere in the sample. + *

    + * API name: {@code quote} + */ + public final Builder quote(@Nullable String value) { + this.quote = value; + return this; + } + + /** + * If the format is delimited, you can specify whether values + * between delimiters should have whitespace trimmed from them. If this + * parameter is not specified and the delimiter is pipe (|), the + * default value is true. Otherwise, the default value is false. + *

    + * API name: {@code should_trim_fields} + */ + public final Builder shouldTrimFields(@Nullable Boolean value) { + this.shouldTrimFields = value; + return this; + } + + /** + * The maximum amount of time that the structure analysis can take. If the + * analysis is still running when the timeout expires, it will be stopped. + *

    + * API name: {@code timeout} + */ + public final Builder timeout(@Nullable Time value) { + this.timeout = value; + return this; + } + + /** + * The maximum amount of time that the structure analysis can take. If the + * analysis is still running when the timeout expires, it will be stopped. + *

    + * API name: {@code timeout} + */ + public final Builder timeout(Function> fn) { + return this.timeout(fn.apply(new Time.Builder()).build()); + } + + /** + * The name of the field that contains the primary timestamp of each record in + * the text. In particular, if the text was ingested into an index, this is the + * field that would be used to populate the @timestamp field. + *

    + * If the format is semi_structured_text, this field must match the + * name of the appropriate extraction in the grok_pattern. + * Therefore, for semi-structured text, it is best not to specify this parameter + * unless grok_pattern is also specified. + *

    + * For structured text, if you specify this parameter, the field must exist + * within the text. + *

    + * If this parameter is not specified, the structure finder makes a decision + * about which field (if any) is the primary timestamp field. For structured + * text, it is not compulsory to have a timestamp in the text. + *

    + * API name: {@code timestamp_field} + */ + public final Builder timestampField(@Nullable String value) { + this.timestampField = value; + return this; + } + + /** + * The Java time format of the timestamp field in the text. Only a subset of + * Java time format letter groups are supported: + *

      + *
    • a
    • + *
    • d
    • + *
    • dd
    • + *
    • EEE
    • + *
    • EEEE
    • + *
    • H
    • + *
    • HH
    • + *
    • h
    • + *
    • M
    • + *
    • MM
    • + *
    • MMM
    • + *
    • MMMM
    • + *
    • mm
    • + *
    • ss
    • + *
    • XX
    • + *
    • XXX
    • + *
    • yy
    • + *
    • yyyy
    • + *
    • zzz
    • + *
    + *

    + * Additionally S letter groups (fractional seconds) of length one + * to nine are supported providing they occur after ss and are + * separated from the ss by a period (.), comma + * (,), or colon (:). Spacing and punctuation is also + * permitted with the exception a question mark (?), newline, and + * carriage return, together with literal text enclosed in single quotes. For + * example, MM/dd HH.mm.ss,SSSSSS 'in' yyyy is a valid override + * format. + *

    + * One valuable use case for this parameter is when the format is + * semi-structured text, there are multiple timestamp formats in the text, and + * you know which format corresponds to the primary timestamp, but you do not + * want to specify the full grok_pattern. Another is when the + * timestamp format is one that the structure finder does not consider by + * default. + *

    + * If this parameter is not specified, the structure finder chooses the best + * format from a built-in set. + *

    + * If the special value null is specified, the structure finder + * will not look for a primary timestamp in the text. When the format is + * semi-structured text, this will result in the structure finder treating the + * text as single-line messages. + *

    + * API name: {@code timestamp_format} + */ + public final Builder timestampFormat(@Nullable String value) { + this.timestampFormat = value; + return this; + } + + @Override + protected Builder self() { + return this; + } + + /** + * Builds a {@link FindMessageStructureRequest}. + * + * @throws NullPointerException + * if some of the required fields are null. + */ + public FindMessageStructureRequest build() { + _checkSingleUse(); + + return new FindMessageStructureRequest(this); + } + } + + // --------------------------------------------------------------------------------------------- + + /** + * Json deserializer for {@link FindMessageStructureRequest} + */ + public static final JsonpDeserializer _DESERIALIZER = ObjectBuilderDeserializer + .lazy(Builder::new, FindMessageStructureRequest::setupFindMessageStructureRequestDeserializer); + + protected static void setupFindMessageStructureRequestDeserializer( + ObjectDeserializer op) { + + op.add(Builder::messages, JsonpDeserializer.arrayDeserializer(JsonpDeserializer.stringDeserializer()), + "messages"); + + } + + // --------------------------------------------------------------------------------------------- + + /** + * Endpoint "{@code text_structure.find_message_structure}". + */ + public static final Endpoint _ENDPOINT = new SimpleEndpoint<>( + "es/text_structure.find_message_structure", + + // Request method + request -> { + return "POST"; + + }, + + // Request path + request -> { + return "/_text_structure/find_message_structure"; + + }, + + // Path parameters + request -> { + return Collections.emptyMap(); + }, + + // Request parameters + request -> { + Map params = new HashMap<>(); + if (request.explain != null) { + params.put("explain", String.valueOf(request.explain)); + } + if (request.timestampFormat != null) { + params.put("timestamp_format", request.timestampFormat); + } + if (request.quote != null) { + params.put("quote", request.quote); + } + if (request.shouldTrimFields != null) { + params.put("should_trim_fields", String.valueOf(request.shouldTrimFields)); + } + if (request.grokPattern != null) { + params.put("grok_pattern", request.grokPattern); + } + if (request.delimiter != null) { + params.put("delimiter", request.delimiter); + } + if (request.format != null) { + params.put("format", request.format.jsonValue()); + } + if (request.columnNames != null) { + params.put("column_names", request.columnNames); + } + if (request.timestampField != null) { + params.put("timestamp_field", request.timestampField); + } + if (request.timeout != null) { + params.put("timeout", request.timeout._toJsonString()); + } + if (request.ecsCompatibility != null) { + params.put("ecs_compatibility", request.ecsCompatibility.jsonValue()); + } + return params; + + }, SimpleEndpoint.emptyMap(), true, FindMessageStructureResponse._DESERIALIZER); +} diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/text_structure/FindMessageStructureResponse.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/text_structure/FindMessageStructureResponse.java new file mode 100644 index 000000000..d3aa839a0 --- /dev/null +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/text_structure/FindMessageStructureResponse.java @@ -0,0 +1,604 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package co.elastic.clients.elasticsearch.text_structure; + +import co.elastic.clients.elasticsearch._types.mapping.TypeMapping; +import co.elastic.clients.elasticsearch.ingest.PipelineConfig; +import co.elastic.clients.json.JsonpDeserializable; +import co.elastic.clients.json.JsonpDeserializer; +import co.elastic.clients.json.JsonpMapper; +import co.elastic.clients.json.JsonpSerializable; +import co.elastic.clients.json.JsonpUtils; +import co.elastic.clients.json.ObjectBuilderDeserializer; +import co.elastic.clients.json.ObjectDeserializer; +import co.elastic.clients.util.ApiTypeHelper; +import co.elastic.clients.util.ObjectBuilder; +import co.elastic.clients.util.WithJsonObjectBuilderBase; +import jakarta.json.stream.JsonGenerator; +import java.lang.Boolean; +import java.lang.Integer; +import java.lang.String; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.function.Function; +import javax.annotation.Nullable; + +//---------------------------------------------------------------- +// THIS CODE IS GENERATED. MANUAL EDITS WILL BE LOST. +//---------------------------------------------------------------- +// +// This code is generated from the Elasticsearch API specification +// at https://github.com/elastic/elasticsearch-specification +// +// Manual updates to this file will be lost when the code is +// re-generated. +// +// If you find a property that is missing or wrongly typed, please +// open an issue or a PR on the API specification repository. +// +//---------------------------------------------------------------- + +// typedef: text_structure.find_message_structure.Response + +/** + * + * @see API + * specification + */ +@JsonpDeserializable +public class FindMessageStructureResponse implements JsonpSerializable { + private final String charset; + + @Nullable + private final EcsCompatibilityType ecsCompatibility; + + private final Map fieldStats; + + private final FormatType format; + + @Nullable + private final String grokPattern; + + private final List javaTimestampFormats; + + private final List jodaTimestampFormats; + + private final PipelineConfig ingestPipeline; + + private final TypeMapping mappings; + + @Nullable + private final String multilineStartPattern; + + private final boolean needClientTimezone; + + private final int numLinesAnalyzed; + + private final int numMessagesAnalyzed; + + private final String sampleStart; + + @Nullable + private final String timestampField; + + // --------------------------------------------------------------------------------------------- + + private FindMessageStructureResponse(Builder builder) { + + this.charset = ApiTypeHelper.requireNonNull(builder.charset, this, "charset"); + this.ecsCompatibility = builder.ecsCompatibility; + this.fieldStats = ApiTypeHelper.unmodifiableRequired(builder.fieldStats, this, "fieldStats"); + this.format = ApiTypeHelper.requireNonNull(builder.format, this, "format"); + this.grokPattern = builder.grokPattern; + this.javaTimestampFormats = ApiTypeHelper.unmodifiable(builder.javaTimestampFormats); + this.jodaTimestampFormats = ApiTypeHelper.unmodifiable(builder.jodaTimestampFormats); + this.ingestPipeline = ApiTypeHelper.requireNonNull(builder.ingestPipeline, this, "ingestPipeline"); + this.mappings = ApiTypeHelper.requireNonNull(builder.mappings, this, "mappings"); + this.multilineStartPattern = builder.multilineStartPattern; + this.needClientTimezone = ApiTypeHelper.requireNonNull(builder.needClientTimezone, this, "needClientTimezone"); + this.numLinesAnalyzed = ApiTypeHelper.requireNonNull(builder.numLinesAnalyzed, this, "numLinesAnalyzed"); + this.numMessagesAnalyzed = ApiTypeHelper.requireNonNull(builder.numMessagesAnalyzed, this, + "numMessagesAnalyzed"); + this.sampleStart = ApiTypeHelper.requireNonNull(builder.sampleStart, this, "sampleStart"); + this.timestampField = builder.timestampField; + + } + + public static FindMessageStructureResponse of(Function> fn) { + return fn.apply(new Builder()).build(); + } + + /** + * Required - API name: {@code charset} + */ + public final String charset() { + return this.charset; + } + + /** + * API name: {@code ecs_compatibility} + */ + @Nullable + public final EcsCompatibilityType ecsCompatibility() { + return this.ecsCompatibility; + } + + /** + * Required - API name: {@code field_stats} + */ + public final Map fieldStats() { + return this.fieldStats; + } + + /** + * Required - API name: {@code format} + */ + public final FormatType format() { + return this.format; + } + + /** + * API name: {@code grok_pattern} + */ + @Nullable + public final String grokPattern() { + return this.grokPattern; + } + + /** + * API name: {@code java_timestamp_formats} + */ + public final List javaTimestampFormats() { + return this.javaTimestampFormats; + } + + /** + * API name: {@code joda_timestamp_formats} + */ + public final List jodaTimestampFormats() { + return this.jodaTimestampFormats; + } + + /** + * Required - API name: {@code ingest_pipeline} + */ + public final PipelineConfig ingestPipeline() { + return this.ingestPipeline; + } + + /** + * Required - API name: {@code mappings} + */ + public final TypeMapping mappings() { + return this.mappings; + } + + /** + * API name: {@code multiline_start_pattern} + */ + @Nullable + public final String multilineStartPattern() { + return this.multilineStartPattern; + } + + /** + * Required - API name: {@code need_client_timezone} + */ + public final boolean needClientTimezone() { + return this.needClientTimezone; + } + + /** + * Required - API name: {@code num_lines_analyzed} + */ + public final int numLinesAnalyzed() { + return this.numLinesAnalyzed; + } + + /** + * Required - API name: {@code num_messages_analyzed} + */ + public final int numMessagesAnalyzed() { + return this.numMessagesAnalyzed; + } + + /** + * Required - API name: {@code sample_start} + */ + public final String sampleStart() { + return this.sampleStart; + } + + /** + * API name: {@code timestamp_field} + */ + @Nullable + public final String timestampField() { + return this.timestampField; + } + + /** + * Serialize this object to JSON. + */ + public void serialize(JsonGenerator generator, JsonpMapper mapper) { + generator.writeStartObject(); + serializeInternal(generator, mapper); + generator.writeEnd(); + } + + protected void serializeInternal(JsonGenerator generator, JsonpMapper mapper) { + + generator.writeKey("charset"); + generator.write(this.charset); + + if (this.ecsCompatibility != null) { + generator.writeKey("ecs_compatibility"); + this.ecsCompatibility.serialize(generator, mapper); + } + if (ApiTypeHelper.isDefined(this.fieldStats)) { + generator.writeKey("field_stats"); + generator.writeStartObject(); + for (Map.Entry item0 : this.fieldStats.entrySet()) { + generator.writeKey(item0.getKey()); + item0.getValue().serialize(generator, mapper); + + } + generator.writeEnd(); + + } + generator.writeKey("format"); + this.format.serialize(generator, mapper); + if (this.grokPattern != null) { + generator.writeKey("grok_pattern"); + generator.write(this.grokPattern); + + } + if (ApiTypeHelper.isDefined(this.javaTimestampFormats)) { + generator.writeKey("java_timestamp_formats"); + generator.writeStartArray(); + for (String item0 : this.javaTimestampFormats) { + generator.write(item0); + + } + generator.writeEnd(); + + } + if (ApiTypeHelper.isDefined(this.jodaTimestampFormats)) { + generator.writeKey("joda_timestamp_formats"); + generator.writeStartArray(); + for (String item0 : this.jodaTimestampFormats) { + generator.write(item0); + + } + generator.writeEnd(); + + } + generator.writeKey("ingest_pipeline"); + this.ingestPipeline.serialize(generator, mapper); + + generator.writeKey("mappings"); + this.mappings.serialize(generator, mapper); + + if (this.multilineStartPattern != null) { + generator.writeKey("multiline_start_pattern"); + generator.write(this.multilineStartPattern); + + } + generator.writeKey("need_client_timezone"); + generator.write(this.needClientTimezone); + + generator.writeKey("num_lines_analyzed"); + generator.write(this.numLinesAnalyzed); + + generator.writeKey("num_messages_analyzed"); + generator.write(this.numMessagesAnalyzed); + + generator.writeKey("sample_start"); + generator.write(this.sampleStart); + + if (this.timestampField != null) { + generator.writeKey("timestamp_field"); + generator.write(this.timestampField); + + } + + } + + @Override + public String toString() { + return JsonpUtils.toString(this); + } + + // --------------------------------------------------------------------------------------------- + + /** + * Builder for {@link FindMessageStructureResponse}. + */ + + public static class Builder extends WithJsonObjectBuilderBase + implements + ObjectBuilder { + private String charset; + + @Nullable + private EcsCompatibilityType ecsCompatibility; + + private Map fieldStats; + + private FormatType format; + + @Nullable + private String grokPattern; + + @Nullable + private List javaTimestampFormats; + + @Nullable + private List jodaTimestampFormats; + + private PipelineConfig ingestPipeline; + + private TypeMapping mappings; + + @Nullable + private String multilineStartPattern; + + private Boolean needClientTimezone; + + private Integer numLinesAnalyzed; + + private Integer numMessagesAnalyzed; + + private String sampleStart; + + @Nullable + private String timestampField; + + /** + * Required - API name: {@code charset} + */ + public final Builder charset(String value) { + this.charset = value; + return this; + } + + /** + * API name: {@code ecs_compatibility} + */ + public final Builder ecsCompatibility(@Nullable EcsCompatibilityType value) { + this.ecsCompatibility = value; + return this; + } + + /** + * Required - API name: {@code field_stats} + *

    + * Adds all entries of map to fieldStats. + */ + public final Builder fieldStats(Map map) { + this.fieldStats = _mapPutAll(this.fieldStats, map); + return this; + } + + /** + * Required - API name: {@code field_stats} + *

    + * Adds an entry to fieldStats. + */ + public final Builder fieldStats(String key, FieldStat value) { + this.fieldStats = _mapPut(this.fieldStats, key, value); + return this; + } + + /** + * Required - API name: {@code field_stats} + *

    + * Adds an entry to fieldStats using a builder lambda. + */ + public final Builder fieldStats(String key, Function> fn) { + return fieldStats(key, fn.apply(new FieldStat.Builder()).build()); + } + + /** + * Required - API name: {@code format} + */ + public final Builder format(FormatType value) { + this.format = value; + return this; + } + + /** + * API name: {@code grok_pattern} + */ + public final Builder grokPattern(@Nullable String value) { + this.grokPattern = value; + return this; + } + + /** + * API name: {@code java_timestamp_formats} + *

    + * Adds all elements of list to javaTimestampFormats. + */ + public final Builder javaTimestampFormats(List list) { + this.javaTimestampFormats = _listAddAll(this.javaTimestampFormats, list); + return this; + } + + /** + * API name: {@code java_timestamp_formats} + *

    + * Adds one or more values to javaTimestampFormats. + */ + public final Builder javaTimestampFormats(String value, String... values) { + this.javaTimestampFormats = _listAdd(this.javaTimestampFormats, value, values); + return this; + } + + /** + * API name: {@code joda_timestamp_formats} + *

    + * Adds all elements of list to jodaTimestampFormats. + */ + public final Builder jodaTimestampFormats(List list) { + this.jodaTimestampFormats = _listAddAll(this.jodaTimestampFormats, list); + return this; + } + + /** + * API name: {@code joda_timestamp_formats} + *

    + * Adds one or more values to jodaTimestampFormats. + */ + public final Builder jodaTimestampFormats(String value, String... values) { + this.jodaTimestampFormats = _listAdd(this.jodaTimestampFormats, value, values); + return this; + } + + /** + * Required - API name: {@code ingest_pipeline} + */ + public final Builder ingestPipeline(PipelineConfig value) { + this.ingestPipeline = value; + return this; + } + + /** + * Required - API name: {@code ingest_pipeline} + */ + public final Builder ingestPipeline(Function> fn) { + return this.ingestPipeline(fn.apply(new PipelineConfig.Builder()).build()); + } + + /** + * Required - API name: {@code mappings} + */ + public final Builder mappings(TypeMapping value) { + this.mappings = value; + return this; + } + + /** + * Required - API name: {@code mappings} + */ + public final Builder mappings(Function> fn) { + return this.mappings(fn.apply(new TypeMapping.Builder()).build()); + } + + /** + * API name: {@code multiline_start_pattern} + */ + public final Builder multilineStartPattern(@Nullable String value) { + this.multilineStartPattern = value; + return this; + } + + /** + * Required - API name: {@code need_client_timezone} + */ + public final Builder needClientTimezone(boolean value) { + this.needClientTimezone = value; + return this; + } + + /** + * Required - API name: {@code num_lines_analyzed} + */ + public final Builder numLinesAnalyzed(int value) { + this.numLinesAnalyzed = value; + return this; + } + + /** + * Required - API name: {@code num_messages_analyzed} + */ + public final Builder numMessagesAnalyzed(int value) { + this.numMessagesAnalyzed = value; + return this; + } + + /** + * Required - API name: {@code sample_start} + */ + public final Builder sampleStart(String value) { + this.sampleStart = value; + return this; + } + + /** + * API name: {@code timestamp_field} + */ + public final Builder timestampField(@Nullable String value) { + this.timestampField = value; + return this; + } + + @Override + protected Builder self() { + return this; + } + + /** + * Builds a {@link FindMessageStructureResponse}. + * + * @throws NullPointerException + * if some of the required fields are null. + */ + public FindMessageStructureResponse build() { + _checkSingleUse(); + + return new FindMessageStructureResponse(this); + } + } + + // --------------------------------------------------------------------------------------------- + + /** + * Json deserializer for {@link FindMessageStructureResponse} + */ + public static final JsonpDeserializer _DESERIALIZER = ObjectBuilderDeserializer + .lazy(Builder::new, FindMessageStructureResponse::setupFindMessageStructureResponseDeserializer); + + protected static void setupFindMessageStructureResponseDeserializer( + ObjectDeserializer op) { + + op.add(Builder::charset, JsonpDeserializer.stringDeserializer(), "charset"); + op.add(Builder::ecsCompatibility, EcsCompatibilityType._DESERIALIZER, "ecs_compatibility"); + op.add(Builder::fieldStats, JsonpDeserializer.stringMapDeserializer(FieldStat._DESERIALIZER), "field_stats"); + op.add(Builder::format, FormatType._DESERIALIZER, "format"); + op.add(Builder::grokPattern, JsonpDeserializer.stringDeserializer(), "grok_pattern"); + op.add(Builder::javaTimestampFormats, + JsonpDeserializer.arrayDeserializer(JsonpDeserializer.stringDeserializer()), "java_timestamp_formats"); + op.add(Builder::jodaTimestampFormats, + JsonpDeserializer.arrayDeserializer(JsonpDeserializer.stringDeserializer()), "joda_timestamp_formats"); + op.add(Builder::ingestPipeline, PipelineConfig._DESERIALIZER, "ingest_pipeline"); + op.add(Builder::mappings, TypeMapping._DESERIALIZER, "mappings"); + op.add(Builder::multilineStartPattern, JsonpDeserializer.stringDeserializer(), "multiline_start_pattern"); + op.add(Builder::needClientTimezone, JsonpDeserializer.booleanDeserializer(), "need_client_timezone"); + op.add(Builder::numLinesAnalyzed, JsonpDeserializer.integerDeserializer(), "num_lines_analyzed"); + op.add(Builder::numMessagesAnalyzed, JsonpDeserializer.integerDeserializer(), "num_messages_analyzed"); + op.add(Builder::sampleStart, JsonpDeserializer.stringDeserializer(), "sample_start"); + op.add(Builder::timestampField, JsonpDeserializer.stringDeserializer(), "timestamp_field"); + + } + +} diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/text_structure/FormatType.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/text_structure/FormatType.java new file mode 100644 index 000000000..faebc1f0f --- /dev/null +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/text_structure/FormatType.java @@ -0,0 +1,71 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package co.elastic.clients.elasticsearch.text_structure; + +import co.elastic.clients.json.JsonEnum; +import co.elastic.clients.json.JsonpDeserializable; +import co.elastic.clients.json.JsonpDeserializer; + +//---------------------------------------------------------------- +// THIS CODE IS GENERATED. MANUAL EDITS WILL BE LOST. +//---------------------------------------------------------------- +// +// This code is generated from the Elasticsearch API specification +// at https://github.com/elastic/elasticsearch-specification +// +// Manual updates to this file will be lost when the code is +// re-generated. +// +// If you find a property that is missing or wrongly typed, please +// open an issue or a PR on the API specification repository. +// +//---------------------------------------------------------------- + +/** + * + * @see API + * specification + */ +@JsonpDeserializable +public enum FormatType implements JsonEnum { + Delimited("delimited"), + + Ndjson("ndjson"), + + SemiStructuredText("semi_structured_text"), + + Xml("xml"), + + ; + + private final String jsonValue; + + FormatType(String jsonValue) { + this.jsonValue = jsonValue; + } + + public String jsonValue() { + return this.jsonValue; + } + + public static final JsonEnum.Deserializer _DESERIALIZER = new JsonEnum.Deserializer<>( + FormatType.values()); +} diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/text_structure/TestGrokPatternRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/text_structure/TestGrokPatternRequest.java index 991d6a88c..c6238fb9d 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/text_structure/TestGrokPatternRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/text_structure/TestGrokPatternRequest.java @@ -59,7 +59,9 @@ // typedef: text_structure.test_grok_pattern.Request /** - * Tests a Grok pattern on some text. + * Test a Grok pattern. Test a Grok pattern on one or more lines of text. The + * API indicates whether the lines match the pattern together with the offsets + * and lengths of the matched substrings. * * @see API diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/text_structure/TopHit.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/text_structure/TopHit.java new file mode 100644 index 000000000..7dfd84ea1 --- /dev/null +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/text_structure/TopHit.java @@ -0,0 +1,178 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package co.elastic.clients.elasticsearch.text_structure; + +import co.elastic.clients.json.JsonData; +import co.elastic.clients.json.JsonpDeserializable; +import co.elastic.clients.json.JsonpDeserializer; +import co.elastic.clients.json.JsonpMapper; +import co.elastic.clients.json.JsonpSerializable; +import co.elastic.clients.json.JsonpUtils; +import co.elastic.clients.json.ObjectBuilderDeserializer; +import co.elastic.clients.json.ObjectDeserializer; +import co.elastic.clients.util.ApiTypeHelper; +import co.elastic.clients.util.ObjectBuilder; +import co.elastic.clients.util.WithJsonObjectBuilderBase; +import jakarta.json.stream.JsonGenerator; +import java.lang.Long; +import java.util.Objects; +import java.util.function.Function; +import javax.annotation.Nullable; + +//---------------------------------------------------------------- +// THIS CODE IS GENERATED. MANUAL EDITS WILL BE LOST. +//---------------------------------------------------------------- +// +// This code is generated from the Elasticsearch API specification +// at https://github.com/elastic/elasticsearch-specification +// +// Manual updates to this file will be lost when the code is +// re-generated. +// +// If you find a property that is missing or wrongly typed, please +// open an issue or a PR on the API specification repository. +// +//---------------------------------------------------------------- + +// typedef: text_structure._types.TopHit + +/** + * + * @see API + * specification + */ +@JsonpDeserializable +public class TopHit implements JsonpSerializable { + private final long count; + + private final JsonData value; + + // --------------------------------------------------------------------------------------------- + + private TopHit(Builder builder) { + + this.count = ApiTypeHelper.requireNonNull(builder.count, this, "count"); + this.value = ApiTypeHelper.requireNonNull(builder.value, this, "value"); + + } + + public static TopHit of(Function> fn) { + return fn.apply(new Builder()).build(); + } + + /** + * Required - API name: {@code count} + */ + public final long count() { + return this.count; + } + + /** + * Required - API name: {@code value} + */ + public final JsonData value() { + return this.value; + } + + /** + * Serialize this object to JSON. + */ + public void serialize(JsonGenerator generator, JsonpMapper mapper) { + generator.writeStartObject(); + serializeInternal(generator, mapper); + generator.writeEnd(); + } + + protected void serializeInternal(JsonGenerator generator, JsonpMapper mapper) { + + generator.writeKey("count"); + generator.write(this.count); + + generator.writeKey("value"); + this.value.serialize(generator, mapper); + + } + + @Override + public String toString() { + return JsonpUtils.toString(this); + } + + // --------------------------------------------------------------------------------------------- + + /** + * Builder for {@link TopHit}. + */ + + public static class Builder extends WithJsonObjectBuilderBase implements ObjectBuilder { + private Long count; + + private JsonData value; + + /** + * Required - API name: {@code count} + */ + public final Builder count(long value) { + this.count = value; + return this; + } + + /** + * Required - API name: {@code value} + */ + public final Builder value(JsonData value) { + this.value = value; + return this; + } + + @Override + protected Builder self() { + return this; + } + + /** + * Builds a {@link TopHit}. + * + * @throws NullPointerException + * if some of the required fields are null. + */ + public TopHit build() { + _checkSingleUse(); + + return new TopHit(this); + } + } + + // --------------------------------------------------------------------------------------------- + + /** + * Json deserializer for {@link TopHit} + */ + public static final JsonpDeserializer _DESERIALIZER = ObjectBuilderDeserializer.lazy(Builder::new, + TopHit::setupTopHitDeserializer); + + protected static void setupTopHitDeserializer(ObjectDeserializer op) { + + op.add(Builder::count, JsonpDeserializer.longDeserializer(), "count"); + op.add(Builder::value, JsonData._DESERIALIZER, "value"); + + } + +} diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/transform/ElasticsearchTransformAsyncClient.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/transform/ElasticsearchTransformAsyncClient.java index f64537fc8..9d58ca3c4 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/transform/ElasticsearchTransformAsyncClient.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/transform/ElasticsearchTransformAsyncClient.java @@ -615,13 +615,24 @@ public final CompletableFuture updateTransform( // ----- Endpoint: transform.upgrade_transforms /** - * Upgrades all transforms. This API identifies transforms that have a legacy - * configuration format and upgrades them to the latest version. It also cleans - * up the internal data structures that store the transform state and - * checkpoints. The upgrade does not affect the source and destination indices. - * The upgrade also does not affect the roles that transforms use when + * Upgrade all transforms. Transforms are compatible across minor versions and + * between supported major versions. However, over time, the format of transform + * configuration information may change. This API identifies transforms that + * have a legacy configuration format and upgrades them to the latest version. + * It also cleans up the internal data structures that store the transform state + * and checkpoints. The upgrade does not affect the source and destination + * indices. The upgrade also does not affect the roles that transforms use when * Elasticsearch security features are enabled; the role used to read source * data and write to the destination index remains unchanged. + *

    + * If a transform upgrade step fails, the upgrade stops and an error is returned + * about the underlying issue. Resolve the issue then re-run the process again. + * A summary is returned when the upgrade is finished. + *

    + * To ensure continuous transforms remain running during a major version upgrade + * of the cluster – for example, from 7.16 to 8.0 – it is recommended to upgrade + * transforms before upgrading the cluster. You may want to perform a recent + * cluster backup prior to the upgrade. * * @see Documentation @@ -636,13 +647,24 @@ public CompletableFuture upgradeTransforms(UpgradeTra } /** - * Upgrades all transforms. This API identifies transforms that have a legacy - * configuration format and upgrades them to the latest version. It also cleans - * up the internal data structures that store the transform state and - * checkpoints. The upgrade does not affect the source and destination indices. - * The upgrade also does not affect the roles that transforms use when + * Upgrade all transforms. Transforms are compatible across minor versions and + * between supported major versions. However, over time, the format of transform + * configuration information may change. This API identifies transforms that + * have a legacy configuration format and upgrades them to the latest version. + * It also cleans up the internal data structures that store the transform state + * and checkpoints. The upgrade does not affect the source and destination + * indices. The upgrade also does not affect the roles that transforms use when * Elasticsearch security features are enabled; the role used to read source * data and write to the destination index remains unchanged. + *

    + * If a transform upgrade step fails, the upgrade stops and an error is returned + * about the underlying issue. Resolve the issue then re-run the process again. + * A summary is returned when the upgrade is finished. + *

    + * To ensure continuous transforms remain running during a major version upgrade + * of the cluster – for example, from 7.16 to 8.0 – it is recommended to upgrade + * transforms before upgrading the cluster. You may want to perform a recent + * cluster backup prior to the upgrade. * * @param fn * a function that initializes a builder to create the @@ -658,13 +680,24 @@ public final CompletableFuture upgradeTransforms( } /** - * Upgrades all transforms. This API identifies transforms that have a legacy - * configuration format and upgrades them to the latest version. It also cleans - * up the internal data structures that store the transform state and - * checkpoints. The upgrade does not affect the source and destination indices. - * The upgrade also does not affect the roles that transforms use when + * Upgrade all transforms. Transforms are compatible across minor versions and + * between supported major versions. However, over time, the format of transform + * configuration information may change. This API identifies transforms that + * have a legacy configuration format and upgrades them to the latest version. + * It also cleans up the internal data structures that store the transform state + * and checkpoints. The upgrade does not affect the source and destination + * indices. The upgrade also does not affect the roles that transforms use when * Elasticsearch security features are enabled; the role used to read source * data and write to the destination index remains unchanged. + *

    + * If a transform upgrade step fails, the upgrade stops and an error is returned + * about the underlying issue. Resolve the issue then re-run the process again. + * A summary is returned when the upgrade is finished. + *

    + * To ensure continuous transforms remain running during a major version upgrade + * of the cluster – for example, from 7.16 to 8.0 – it is recommended to upgrade + * transforms before upgrading the cluster. You may want to perform a recent + * cluster backup prior to the upgrade. * * @see Documentation diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/transform/ElasticsearchTransformClient.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/transform/ElasticsearchTransformClient.java index f870e782b..009bd385e 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/transform/ElasticsearchTransformClient.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/transform/ElasticsearchTransformClient.java @@ -630,13 +630,24 @@ public final UpdateTransformResponse updateTransform( // ----- Endpoint: transform.upgrade_transforms /** - * Upgrades all transforms. This API identifies transforms that have a legacy - * configuration format and upgrades them to the latest version. It also cleans - * up the internal data structures that store the transform state and - * checkpoints. The upgrade does not affect the source and destination indices. - * The upgrade also does not affect the roles that transforms use when + * Upgrade all transforms. Transforms are compatible across minor versions and + * between supported major versions. However, over time, the format of transform + * configuration information may change. This API identifies transforms that + * have a legacy configuration format and upgrades them to the latest version. + * It also cleans up the internal data structures that store the transform state + * and checkpoints. The upgrade does not affect the source and destination + * indices. The upgrade also does not affect the roles that transforms use when * Elasticsearch security features are enabled; the role used to read source * data and write to the destination index remains unchanged. + *

    + * If a transform upgrade step fails, the upgrade stops and an error is returned + * about the underlying issue. Resolve the issue then re-run the process again. + * A summary is returned when the upgrade is finished. + *

    + * To ensure continuous transforms remain running during a major version upgrade + * of the cluster – for example, from 7.16 to 8.0 – it is recommended to upgrade + * transforms before upgrading the cluster. You may want to perform a recent + * cluster backup prior to the upgrade. * * @see Documentation @@ -652,13 +663,24 @@ public UpgradeTransformsResponse upgradeTransforms(UpgradeTransformsRequest requ } /** - * Upgrades all transforms. This API identifies transforms that have a legacy - * configuration format and upgrades them to the latest version. It also cleans - * up the internal data structures that store the transform state and - * checkpoints. The upgrade does not affect the source and destination indices. - * The upgrade also does not affect the roles that transforms use when + * Upgrade all transforms. Transforms are compatible across minor versions and + * between supported major versions. However, over time, the format of transform + * configuration information may change. This API identifies transforms that + * have a legacy configuration format and upgrades them to the latest version. + * It also cleans up the internal data structures that store the transform state + * and checkpoints. The upgrade does not affect the source and destination + * indices. The upgrade also does not affect the roles that transforms use when * Elasticsearch security features are enabled; the role used to read source * data and write to the destination index remains unchanged. + *

    + * If a transform upgrade step fails, the upgrade stops and an error is returned + * about the underlying issue. Resolve the issue then re-run the process again. + * A summary is returned when the upgrade is finished. + *

    + * To ensure continuous transforms remain running during a major version upgrade + * of the cluster – for example, from 7.16 to 8.0 – it is recommended to upgrade + * transforms before upgrading the cluster. You may want to perform a recent + * cluster backup prior to the upgrade. * * @param fn * a function that initializes a builder to create the @@ -675,13 +697,24 @@ public final UpgradeTransformsResponse upgradeTransforms( } /** - * Upgrades all transforms. This API identifies transforms that have a legacy - * configuration format and upgrades them to the latest version. It also cleans - * up the internal data structures that store the transform state and - * checkpoints. The upgrade does not affect the source and destination indices. - * The upgrade also does not affect the roles that transforms use when + * Upgrade all transforms. Transforms are compatible across minor versions and + * between supported major versions. However, over time, the format of transform + * configuration information may change. This API identifies transforms that + * have a legacy configuration format and upgrades them to the latest version. + * It also cleans up the internal data structures that store the transform state + * and checkpoints. The upgrade does not affect the source and destination + * indices. The upgrade also does not affect the roles that transforms use when * Elasticsearch security features are enabled; the role used to read source * data and write to the destination index remains unchanged. + *

    + * If a transform upgrade step fails, the upgrade stops and an error is returned + * about the underlying issue. Resolve the issue then re-run the process again. + * A summary is returned when the upgrade is finished. + *

    + * To ensure continuous transforms remain running during a major version upgrade + * of the cluster – for example, from 7.16 to 8.0 – it is recommended to upgrade + * transforms before upgrading the cluster. You may want to perform a recent + * cluster backup prior to the upgrade. * * @see Documentation diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/transform/UpgradeTransformsRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/transform/UpgradeTransformsRequest.java index 7b351249c..727227830 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/transform/UpgradeTransformsRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/transform/UpgradeTransformsRequest.java @@ -56,13 +56,24 @@ // typedef: transform.upgrade_transforms.Request /** - * Upgrades all transforms. This API identifies transforms that have a legacy - * configuration format and upgrades them to the latest version. It also cleans - * up the internal data structures that store the transform state and - * checkpoints. The upgrade does not affect the source and destination indices. - * The upgrade also does not affect the roles that transforms use when + * Upgrade all transforms. Transforms are compatible across minor versions and + * between supported major versions. However, over time, the format of transform + * configuration information may change. This API identifies transforms that + * have a legacy configuration format and upgrades them to the latest version. + * It also cleans up the internal data structures that store the transform state + * and checkpoints. The upgrade does not affect the source and destination + * indices. The upgrade also does not affect the roles that transforms use when * Elasticsearch security features are enabled; the role used to read source * data and write to the destination index remains unchanged. + *

    + * If a transform upgrade step fails, the upgrade stops and an error is returned + * about the underlying issue. Resolve the issue then re-run the process again. + * A summary is returned when the upgrade is finished. + *

    + * To ensure continuous transforms remain running during a major version upgrade + * of the cluster – for example, from 7.16 to 8.0 – it is recommended to upgrade + * transforms before upgrading the cluster. You may want to perform a recent + * cluster backup prior to the upgrade. * * @see API diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/watcher/AckWatchRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/watcher/AckWatchRequest.java index ac998913b..3f6611356 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/watcher/AckWatchRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/watcher/AckWatchRequest.java @@ -58,8 +58,15 @@ // typedef: watcher.ack_watch.Request /** - * Acknowledges a watch, manually throttling the execution of the watch's - * actions. + * Acknowledge a watch. Acknowledging a watch enables you to manually throttle + * the execution of the watch's actions. + *

    + * The acknowledgement state of an action is stored in the + * status.actions.<id>.ack.state structure. + *

    + * IMPORTANT: If the specified watch is currently being executed, this API will + * return an error The reason for this behavior is to prevent overwriting the + * watch status from a watch execution. * * @see API * specification diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/watcher/ActivateWatchRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/watcher/ActivateWatchRequest.java index 17f91830b..20c2c5edf 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/watcher/ActivateWatchRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/watcher/ActivateWatchRequest.java @@ -56,7 +56,7 @@ // typedef: watcher.activate_watch.Request /** - * Activates a currently inactive watch. + * Activate a watch. A watch can be either active or inactive. * * @see API * specification diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/watcher/DeactivateWatchRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/watcher/DeactivateWatchRequest.java index 6181ca080..0ea05e563 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/watcher/DeactivateWatchRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/watcher/DeactivateWatchRequest.java @@ -56,7 +56,7 @@ // typedef: watcher.deactivate_watch.Request /** - * Deactivates a currently active watch. + * Deactivate a watch. A watch can be either active or inactive. * * @see API diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/watcher/DeleteWatchRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/watcher/DeleteWatchRequest.java index a4a79459e..312fa1cc2 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/watcher/DeleteWatchRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/watcher/DeleteWatchRequest.java @@ -56,7 +56,18 @@ // typedef: watcher.delete_watch.Request /** - * Removes a watch from Watcher. + * Delete a watch. When the watch is removed, the document representing the + * watch in the .watches index is gone and it will never be run + * again. + *

    + * Deleting a watch does not delete any watch execution records related to this + * watch from the watch history. + *

    + * IMPORTANT: Deleting a watch must be done by using only this API. Do not + * delete the watch directly from the .watches index using the + * Elasticsearch delete document API When Elasticsearch security features are + * enabled, make sure no write privileges are granted to anyone for the + * .watches index. * * @see API * specification diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/watcher/ElasticsearchWatcherAsyncClient.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/watcher/ElasticsearchWatcherAsyncClient.java index 6c37a09b0..620404fd6 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/watcher/ElasticsearchWatcherAsyncClient.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/watcher/ElasticsearchWatcherAsyncClient.java @@ -70,8 +70,15 @@ public ElasticsearchWatcherAsyncClient withTransportOptions(@Nullable TransportO // ----- Endpoint: watcher.ack_watch /** - * Acknowledges a watch, manually throttling the execution of the watch's - * actions. + * Acknowledge a watch. Acknowledging a watch enables you to manually throttle + * the execution of the watch's actions. + *

    + * The acknowledgement state of an action is stored in the + * status.actions.<id>.ack.state structure. + *

    + * IMPORTANT: If the specified watch is currently being executed, this API will + * return an error The reason for this behavior is to prevent overwriting the + * watch status from a watch execution. * * @see Documentation @@ -86,8 +93,15 @@ public CompletableFuture ackWatch(AckWatchRequest request) { } /** - * Acknowledges a watch, manually throttling the execution of the watch's - * actions. + * Acknowledge a watch. Acknowledging a watch enables you to manually throttle + * the execution of the watch's actions. + *

    + * The acknowledgement state of an action is stored in the + * status.actions.<id>.ack.state structure. + *

    + * IMPORTANT: If the specified watch is currently being executed, this API will + * return an error The reason for this behavior is to prevent overwriting the + * watch status from a watch execution. * * @param fn * a function that initializes a builder to create the @@ -105,7 +119,7 @@ public final CompletableFuture ackWatch( // ----- Endpoint: watcher.activate_watch /** - * Activates a currently inactive watch. + * Activate a watch. A watch can be either active or inactive. * * @see Documentation @@ -120,7 +134,7 @@ public CompletableFuture activateWatch(ActivateWatchReque } /** - * Activates a currently inactive watch. + * Activate a watch. A watch can be either active or inactive. * * @param fn * a function that initializes a builder to create the @@ -138,7 +152,7 @@ public final CompletableFuture activateWatch( // ----- Endpoint: watcher.deactivate_watch /** - * Deactivates a currently active watch. + * Deactivate a watch. A watch can be either active or inactive. * * @see Documentation @@ -153,7 +167,7 @@ public CompletableFuture deactivateWatch(DeactivateWatc } /** - * Deactivates a currently active watch. + * Deactivate a watch. A watch can be either active or inactive. * * @param fn * a function that initializes a builder to create the @@ -171,7 +185,18 @@ public final CompletableFuture deactivateWatch( // ----- Endpoint: watcher.delete_watch /** - * Removes a watch from Watcher. + * Delete a watch. When the watch is removed, the document representing the + * watch in the .watches index is gone and it will never be run + * again. + *

    + * Deleting a watch does not delete any watch execution records related to this + * watch from the watch history. + *

    + * IMPORTANT: Deleting a watch must be done by using only this API. Do not + * delete the watch directly from the .watches index using the + * Elasticsearch delete document API When Elasticsearch security features are + * enabled, make sure no write privileges are granted to anyone for the + * .watches index. * * @see Documentation @@ -186,7 +211,18 @@ public CompletableFuture deleteWatch(DeleteWatchRequest req } /** - * Removes a watch from Watcher. + * Delete a watch. When the watch is removed, the document representing the + * watch in the .watches index is gone and it will never be run + * again. + *

    + * Deleting a watch does not delete any watch execution records related to this + * watch from the watch history. + *

    + * IMPORTANT: Deleting a watch must be done by using only this API. Do not + * delete the watch directly from the .watches index using the + * Elasticsearch delete document API When Elasticsearch security features are + * enabled, make sure no write privileges are granted to anyone for the + * .watches index. * * @param fn * a function that initializes a builder to create the @@ -204,13 +240,19 @@ public final CompletableFuture deleteWatch( // ----- Endpoint: watcher.execute_watch /** - * This API can be used to force execution of the watch outside of its - * triggering logic or to simulate the watch execution for debugging purposes. + * Run a watch. This API can be used to force execution of the watch outside of + * its triggering logic or to simulate the watch execution for debugging + * purposes. + *

    * For testing and debugging purposes, you also have fine-grained control on how - * the watch runs. You can execute the watch without executing all of its - * actions or alternatively by simulating them. You can also force execution by - * ignoring the watch condition and control whether a watch record would be - * written to the watch history after execution. + * the watch runs. You can run the watch without running all of its actions or + * alternatively by simulating them. You can also force execution by ignoring + * the watch condition and control whether a watch record would be written to + * the watch history after it runs. + *

    + * You can use the run watch API to run watches that are not yet registered by + * specifying the watch definition inline. This serves as great tool for testing + * and debugging your watches prior to adding them to Watcher. * * @see Documentation @@ -225,13 +267,19 @@ public CompletableFuture executeWatch(ExecuteWatchRequest } /** - * This API can be used to force execution of the watch outside of its - * triggering logic or to simulate the watch execution for debugging purposes. + * Run a watch. This API can be used to force execution of the watch outside of + * its triggering logic or to simulate the watch execution for debugging + * purposes. + *

    * For testing and debugging purposes, you also have fine-grained control on how - * the watch runs. You can execute the watch without executing all of its - * actions or alternatively by simulating them. You can also force execution by - * ignoring the watch condition and control whether a watch record would be - * written to the watch history after execution. + * the watch runs. You can run the watch without running all of its actions or + * alternatively by simulating them. You can also force execution by ignoring + * the watch condition and control whether a watch record would be written to + * the watch history after it runs. + *

    + * You can use the run watch API to run watches that are not yet registered by + * specifying the watch definition inline. This serves as great tool for testing + * and debugging your watches prior to adding them to Watcher. * * @param fn * a function that initializes a builder to create the @@ -247,13 +295,19 @@ public final CompletableFuture executeWatch( } /** - * This API can be used to force execution of the watch outside of its - * triggering logic or to simulate the watch execution for debugging purposes. + * Run a watch. This API can be used to force execution of the watch outside of + * its triggering logic or to simulate the watch execution for debugging + * purposes. + *

    * For testing and debugging purposes, you also have fine-grained control on how - * the watch runs. You can execute the watch without executing all of its - * actions or alternatively by simulating them. You can also force execution by - * ignoring the watch condition and control whether a watch record would be - * written to the watch history after execution. + * the watch runs. You can run the watch without running all of its actions or + * alternatively by simulating them. You can also force execution by ignoring + * the watch condition and control whether a watch record would be written to + * the watch history after it runs. + *

    + * You can use the run watch API to run watches that are not yet registered by + * specifying the watch definition inline. This serves as great tool for testing + * and debugging your watches prior to adding them to Watcher. * * @see Documentation @@ -268,7 +322,7 @@ public CompletableFuture executeWatch() { // ----- Endpoint: watcher.get_watch /** - * Retrieves a watch by its ID. + * Get a watch. * * @see Documentation @@ -283,7 +337,7 @@ public CompletableFuture getWatch(GetWatchRequest request) { } /** - * Retrieves a watch by its ID. + * Get a watch. * * @param fn * a function that initializes a builder to create the @@ -301,7 +355,23 @@ public final CompletableFuture getWatch( // ----- Endpoint: watcher.put_watch /** - * Creates a new watch, or updates an existing one. + * Create or update a watch. When a watch is registered, a new document that + * represents the watch is added to the .watches index and its + * trigger is immediately registered with the relevant trigger engine. Typically + * for the schedule trigger, the scheduler is the trigger engine. + *

    + * IMPORTANT: You must use Kibana or this API to create a watch. Do not add a + * watch directly to the .watches index by using the Elasticsearch + * index API. If Elasticsearch security features are enabled, do not give users + * write privileges on the .watches index. + *

    + * When you add a watch you can also define its initial active state by setting + * the active parameter. + *

    + * When Elasticsearch security features are enabled, your watch can index or + * search only on indices for which the user that stored the watch has + * privileges. If the user is able to read index a, but not index + * b, the same will apply when the watch runs. * * @see Documentation @@ -316,7 +386,23 @@ public CompletableFuture putWatch(PutWatchRequest request) { } /** - * Creates a new watch, or updates an existing one. + * Create or update a watch. When a watch is registered, a new document that + * represents the watch is added to the .watches index and its + * trigger is immediately registered with the relevant trigger engine. Typically + * for the schedule trigger, the scheduler is the trigger engine. + *

    + * IMPORTANT: You must use Kibana or this API to create a watch. Do not add a + * watch directly to the .watches index by using the Elasticsearch + * index API. If Elasticsearch security features are enabled, do not give users + * write privileges on the .watches index. + *

    + * When you add a watch you can also define its initial active state by setting + * the active parameter. + *

    + * When Elasticsearch security features are enabled, your watch can index or + * search only on indices for which the user that stored the watch has + * privileges. If the user is able to read index a, but not index + * b, the same will apply when the watch runs. * * @param fn * a function that initializes a builder to create the @@ -334,7 +420,8 @@ public final CompletableFuture putWatch( // ----- Endpoint: watcher.query_watches /** - * Retrieves stored watches. + * Query watches. Get all registered watches in a paginated manner and + * optionally filter watches by a query. * * @see Documentation @@ -349,7 +436,8 @@ public CompletableFuture queryWatches(QueryWatchesRequest } /** - * Retrieves stored watches. + * Query watches. Get all registered watches in a paginated manner and + * optionally filter watches by a query. * * @param fn * a function that initializes a builder to create the @@ -365,7 +453,8 @@ public final CompletableFuture queryWatches( } /** - * Retrieves stored watches. + * Query watches. Get all registered watches in a paginated manner and + * optionally filter watches by a query. * * @see Documentation @@ -380,7 +469,8 @@ public CompletableFuture queryWatches() { // ----- Endpoint: watcher.start /** - * Starts Watcher if it is not already running. + * Start the watch service. Start the Watcher service if it is not already + * running. * * @see Documentation @@ -394,7 +484,7 @@ public CompletableFuture start() { // ----- Endpoint: watcher.stats /** - * Retrieves the current Watcher metrics. + * Get Watcher statistics. * * @see Documentation @@ -409,7 +499,7 @@ public CompletableFuture stats(WatcherStatsRequest request } /** - * Retrieves the current Watcher metrics. + * Get Watcher statistics. * * @param fn * a function that initializes a builder to create the @@ -425,7 +515,7 @@ public final CompletableFuture stats( } /** - * Retrieves the current Watcher metrics. + * Get Watcher statistics. * * @see Documentation @@ -440,7 +530,7 @@ public CompletableFuture stats() { // ----- Endpoint: watcher.stop /** - * Stops Watcher if it is running. + * Stop the watch service. Stop the Watcher service if it is running. * * @see Documentation diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/watcher/ElasticsearchWatcherClient.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/watcher/ElasticsearchWatcherClient.java index d5956b5ea..90bb6c101 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/watcher/ElasticsearchWatcherClient.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/watcher/ElasticsearchWatcherClient.java @@ -68,8 +68,15 @@ public ElasticsearchWatcherClient withTransportOptions(@Nullable TransportOption // ----- Endpoint: watcher.ack_watch /** - * Acknowledges a watch, manually throttling the execution of the watch's - * actions. + * Acknowledge a watch. Acknowledging a watch enables you to manually throttle + * the execution of the watch's actions. + *

    + * The acknowledgement state of an action is stored in the + * status.actions.<id>.ack.state structure. + *

    + * IMPORTANT: If the specified watch is currently being executed, this API will + * return an error The reason for this behavior is to prevent overwriting the + * watch status from a watch execution. * * @see Documentation @@ -84,8 +91,15 @@ public AckWatchResponse ackWatch(AckWatchRequest request) throws IOException, El } /** - * Acknowledges a watch, manually throttling the execution of the watch's - * actions. + * Acknowledge a watch. Acknowledging a watch enables you to manually throttle + * the execution of the watch's actions. + *

    + * The acknowledgement state of an action is stored in the + * status.actions.<id>.ack.state structure. + *

    + * IMPORTANT: If the specified watch is currently being executed, this API will + * return an error The reason for this behavior is to prevent overwriting the + * watch status from a watch execution. * * @param fn * a function that initializes a builder to create the @@ -103,7 +117,7 @@ public final AckWatchResponse ackWatch(FunctionDocumentation @@ -119,7 +133,7 @@ public ActivateWatchResponse activateWatch(ActivateWatchRequest request) } /** - * Activates a currently inactive watch. + * Activate a watch. A watch can be either active or inactive. * * @param fn * a function that initializes a builder to create the @@ -138,7 +152,7 @@ public final ActivateWatchResponse activateWatch( // ----- Endpoint: watcher.deactivate_watch /** - * Deactivates a currently active watch. + * Deactivate a watch. A watch can be either active or inactive. * * @see Documentation @@ -154,7 +168,7 @@ public DeactivateWatchResponse deactivateWatch(DeactivateWatchRequest request) } /** - * Deactivates a currently active watch. + * Deactivate a watch. A watch can be either active or inactive. * * @param fn * a function that initializes a builder to create the @@ -173,7 +187,18 @@ public final DeactivateWatchResponse deactivateWatch( // ----- Endpoint: watcher.delete_watch /** - * Removes a watch from Watcher. + * Delete a watch. When the watch is removed, the document representing the + * watch in the .watches index is gone and it will never be run + * again. + *

    + * Deleting a watch does not delete any watch execution records related to this + * watch from the watch history. + *

    + * IMPORTANT: Deleting a watch must be done by using only this API. Do not + * delete the watch directly from the .watches index using the + * Elasticsearch delete document API When Elasticsearch security features are + * enabled, make sure no write privileges are granted to anyone for the + * .watches index. * * @see Documentation @@ -188,7 +213,18 @@ public DeleteWatchResponse deleteWatch(DeleteWatchRequest request) throws IOExce } /** - * Removes a watch from Watcher. + * Delete a watch. When the watch is removed, the document representing the + * watch in the .watches index is gone and it will never be run + * again. + *

    + * Deleting a watch does not delete any watch execution records related to this + * watch from the watch history. + *

    + * IMPORTANT: Deleting a watch must be done by using only this API. Do not + * delete the watch directly from the .watches index using the + * Elasticsearch delete document API When Elasticsearch security features are + * enabled, make sure no write privileges are granted to anyone for the + * .watches index. * * @param fn * a function that initializes a builder to create the @@ -207,13 +243,19 @@ public final DeleteWatchResponse deleteWatch( // ----- Endpoint: watcher.execute_watch /** - * This API can be used to force execution of the watch outside of its - * triggering logic or to simulate the watch execution for debugging purposes. + * Run a watch. This API can be used to force execution of the watch outside of + * its triggering logic or to simulate the watch execution for debugging + * purposes. + *

    * For testing and debugging purposes, you also have fine-grained control on how - * the watch runs. You can execute the watch without executing all of its - * actions or alternatively by simulating them. You can also force execution by - * ignoring the watch condition and control whether a watch record would be - * written to the watch history after execution. + * the watch runs. You can run the watch without running all of its actions or + * alternatively by simulating them. You can also force execution by ignoring + * the watch condition and control whether a watch record would be written to + * the watch history after it runs. + *

    + * You can use the run watch API to run watches that are not yet registered by + * specifying the watch definition inline. This serves as great tool for testing + * and debugging your watches prior to adding them to Watcher. * * @see Documentation @@ -228,13 +270,19 @@ public ExecuteWatchResponse executeWatch(ExecuteWatchRequest request) throws IOE } /** - * This API can be used to force execution of the watch outside of its - * triggering logic or to simulate the watch execution for debugging purposes. + * Run a watch. This API can be used to force execution of the watch outside of + * its triggering logic or to simulate the watch execution for debugging + * purposes. + *

    * For testing and debugging purposes, you also have fine-grained control on how - * the watch runs. You can execute the watch without executing all of its - * actions or alternatively by simulating them. You can also force execution by - * ignoring the watch condition and control whether a watch record would be - * written to the watch history after execution. + * the watch runs. You can run the watch without running all of its actions or + * alternatively by simulating them. You can also force execution by ignoring + * the watch condition and control whether a watch record would be written to + * the watch history after it runs. + *

    + * You can use the run watch API to run watches that are not yet registered by + * specifying the watch definition inline. This serves as great tool for testing + * and debugging your watches prior to adding them to Watcher. * * @param fn * a function that initializes a builder to create the @@ -251,13 +299,19 @@ public final ExecuteWatchResponse executeWatch( } /** - * This API can be used to force execution of the watch outside of its - * triggering logic or to simulate the watch execution for debugging purposes. + * Run a watch. This API can be used to force execution of the watch outside of + * its triggering logic or to simulate the watch execution for debugging + * purposes. + *

    * For testing and debugging purposes, you also have fine-grained control on how - * the watch runs. You can execute the watch without executing all of its - * actions or alternatively by simulating them. You can also force execution by - * ignoring the watch condition and control whether a watch record would be - * written to the watch history after execution. + * the watch runs. You can run the watch without running all of its actions or + * alternatively by simulating them. You can also force execution by ignoring + * the watch condition and control whether a watch record would be written to + * the watch history after it runs. + *

    + * You can use the run watch API to run watches that are not yet registered by + * specifying the watch definition inline. This serves as great tool for testing + * and debugging your watches prior to adding them to Watcher. * * @see Documentation @@ -272,7 +326,7 @@ public ExecuteWatchResponse executeWatch() throws IOException, ElasticsearchExce // ----- Endpoint: watcher.get_watch /** - * Retrieves a watch by its ID. + * Get a watch. * * @see Documentation @@ -287,7 +341,7 @@ public GetWatchResponse getWatch(GetWatchRequest request) throws IOException, El } /** - * Retrieves a watch by its ID. + * Get a watch. * * @param fn * a function that initializes a builder to create the @@ -305,7 +359,23 @@ public final GetWatchResponse getWatch(Function.watches index and its + * trigger is immediately registered with the relevant trigger engine. Typically + * for the schedule trigger, the scheduler is the trigger engine. + *

    + * IMPORTANT: You must use Kibana or this API to create a watch. Do not add a + * watch directly to the .watches index by using the Elasticsearch + * index API. If Elasticsearch security features are enabled, do not give users + * write privileges on the .watches index. + *

    + * When you add a watch you can also define its initial active state by setting + * the active parameter. + *

    + * When Elasticsearch security features are enabled, your watch can index or + * search only on indices for which the user that stored the watch has + * privileges. If the user is able to read index a, but not index + * b, the same will apply when the watch runs. * * @see Documentation @@ -320,7 +390,23 @@ public PutWatchResponse putWatch(PutWatchRequest request) throws IOException, El } /** - * Creates a new watch, or updates an existing one. + * Create or update a watch. When a watch is registered, a new document that + * represents the watch is added to the .watches index and its + * trigger is immediately registered with the relevant trigger engine. Typically + * for the schedule trigger, the scheduler is the trigger engine. + *

    + * IMPORTANT: You must use Kibana or this API to create a watch. Do not add a + * watch directly to the .watches index by using the Elasticsearch + * index API. If Elasticsearch security features are enabled, do not give users + * write privileges on the .watches index. + *

    + * When you add a watch you can also define its initial active state by setting + * the active parameter. + *

    + * When Elasticsearch security features are enabled, your watch can index or + * search only on indices for which the user that stored the watch has + * privileges. If the user is able to read index a, but not index + * b, the same will apply when the watch runs. * * @param fn * a function that initializes a builder to create the @@ -338,7 +424,8 @@ public final PutWatchResponse putWatch(FunctionDocumentation @@ -353,7 +440,8 @@ public QueryWatchesResponse queryWatches(QueryWatchesRequest request) throws IOE } /** - * Retrieves stored watches. + * Query watches. Get all registered watches in a paginated manner and + * optionally filter watches by a query. * * @param fn * a function that initializes a builder to create the @@ -370,7 +458,8 @@ public final QueryWatchesResponse queryWatches( } /** - * Retrieves stored watches. + * Query watches. Get all registered watches in a paginated manner and + * optionally filter watches by a query. * * @see Documentation @@ -385,7 +474,8 @@ public QueryWatchesResponse queryWatches() throws IOException, ElasticsearchExce // ----- Endpoint: watcher.start /** - * Starts Watcher if it is not already running. + * Start the watch service. Start the Watcher service if it is not already + * running. * * @see Documentation @@ -399,7 +489,7 @@ public StartWatcherResponse start() throws IOException, ElasticsearchException { // ----- Endpoint: watcher.stats /** - * Retrieves the current Watcher metrics. + * Get Watcher statistics. * * @see Documentation @@ -414,7 +504,7 @@ public WatcherStatsResponse stats(WatcherStatsRequest request) throws IOExceptio } /** - * Retrieves the current Watcher metrics. + * Get Watcher statistics. * * @param fn * a function that initializes a builder to create the @@ -431,7 +521,7 @@ public final WatcherStatsResponse stats( } /** - * Retrieves the current Watcher metrics. + * Get Watcher statistics. * * @see Documentation @@ -446,7 +536,7 @@ public WatcherStatsResponse stats() throws IOException, ElasticsearchException { // ----- Endpoint: watcher.stop /** - * Stops Watcher if it is running. + * Stop the watch service. Stop the Watcher service if it is running. * * @see Documentation diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/watcher/ExecuteWatchRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/watcher/ExecuteWatchRequest.java index abaa69831..d9a914e09 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/watcher/ExecuteWatchRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/watcher/ExecuteWatchRequest.java @@ -59,13 +59,19 @@ // typedef: watcher.execute_watch.Request /** - * This API can be used to force execution of the watch outside of its - * triggering logic or to simulate the watch execution for debugging purposes. + * Run a watch. This API can be used to force execution of the watch outside of + * its triggering logic or to simulate the watch execution for debugging + * purposes. + *

    * For testing and debugging purposes, you also have fine-grained control on how - * the watch runs. You can execute the watch without executing all of its - * actions or alternatively by simulating them. You can also force execution by - * ignoring the watch condition and control whether a watch record would be - * written to the watch history after execution. + * the watch runs. You can run the watch without running all of its actions or + * alternatively by simulating them. You can also force execution by ignoring + * the watch condition and control whether a watch record would be written to + * the watch history after it runs. + *

    + * You can use the run watch API to run watches that are not yet registered by + * specifying the watch definition inline. This serves as great tool for testing + * and debugging your watches prior to adding them to Watcher. * * @see API * specification diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/watcher/GetWatchRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/watcher/GetWatchRequest.java index e5d78074f..4c2d9e7c7 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/watcher/GetWatchRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/watcher/GetWatchRequest.java @@ -56,7 +56,7 @@ // typedef: watcher.get_watch.Request /** - * Retrieves a watch by its ID. + * Get a watch. * * @see API * specification diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/watcher/PutWatchRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/watcher/PutWatchRequest.java index ad1add11e..9ff18f4de 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/watcher/PutWatchRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/watcher/PutWatchRequest.java @@ -61,7 +61,23 @@ // typedef: watcher.put_watch.Request /** - * Creates a new watch, or updates an existing one. + * Create or update a watch. When a watch is registered, a new document that + * represents the watch is added to the .watches index and its + * trigger is immediately registered with the relevant trigger engine. Typically + * for the schedule trigger, the scheduler is the trigger engine. + *

    + * IMPORTANT: You must use Kibana or this API to create a watch. Do not add a + * watch directly to the .watches index by using the Elasticsearch + * index API. If Elasticsearch security features are enabled, do not give users + * write privileges on the .watches index. + *

    + * When you add a watch you can also define its initial active state by setting + * the active parameter. + *

    + * When Elasticsearch security features are enabled, your watch can index or + * search only on indices for which the user that stored the watch has + * privileges. If the user is able to read index a, but not index + * b, the same will apply when the watch runs. * * @see API * specification diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/watcher/QueryWatchesRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/watcher/QueryWatchesRequest.java index f9f2a8da9..75f7c2a46 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/watcher/QueryWatchesRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/watcher/QueryWatchesRequest.java @@ -61,7 +61,8 @@ // typedef: watcher.query_watches.Request /** - * Retrieves stored watches. + * Query watches. Get all registered watches in a paginated manner and + * optionally filter watches by a query. * * @see API * specification diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/watcher/StartWatcherRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/watcher/StartWatcherRequest.java index cedaadac6..a220fde45 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/watcher/StartWatcherRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/watcher/StartWatcherRequest.java @@ -50,7 +50,8 @@ // typedef: watcher.start.Request /** - * Starts Watcher if it is not already running. + * Start the watch service. Start the Watcher service if it is not already + * running. * * @see API * specification diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/watcher/StopWatcherRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/watcher/StopWatcherRequest.java index 2458b25c7..a0e87e75b 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/watcher/StopWatcherRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/watcher/StopWatcherRequest.java @@ -50,7 +50,7 @@ // typedef: watcher.stop.Request /** - * Stops Watcher if it is running. + * Stop the watch service. Stop the Watcher service if it is running. * * @see API * specification diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/watcher/WatcherStatsRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/watcher/WatcherStatsRequest.java index 0c7fccd37..c6c40a9d0 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/watcher/WatcherStatsRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/watcher/WatcherStatsRequest.java @@ -58,7 +58,7 @@ // typedef: watcher.stats.Request /** - * Retrieves the current Watcher metrics. + * Get Watcher statistics. * * @see API * specification diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/xpack/ElasticsearchXpackAsyncClient.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/xpack/ElasticsearchXpackAsyncClient.java index c29569deb..567859792 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/xpack/ElasticsearchXpackAsyncClient.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/xpack/ElasticsearchXpackAsyncClient.java @@ -68,8 +68,14 @@ public ElasticsearchXpackAsyncClient withTransportOptions(@Nullable TransportOpt // ----- Endpoint: xpack.info /** - * Provides general information about the installed X-Pack features. - * + * Get information. The information provided by the API includes: + *

      + *
    • Build information including the build number and timestamp.
    • + *
    • License information about the currently installed license.
    • + *
    • Feature information for the features that are currently enabled and + * available under the current license.
    • + *
    + * * @see Documentation * on elastic.co @@ -83,8 +89,14 @@ public CompletableFuture info(XpackInfoRequest request) { } /** - * Provides general information about the installed X-Pack features. - * + * Get information. The information provided by the API includes: + *
      + *
    • Build information including the build number and timestamp.
    • + *
    • License information about the currently installed license.
    • + *
    • Feature information for the features that are currently enabled and + * available under the current license.
    • + *
    + * * @param fn * a function that initializes a builder to create the * {@link XpackInfoRequest} @@ -99,8 +111,14 @@ public final CompletableFuture info( } /** - * Provides general information about the installed X-Pack features. - * + * Get information. The information provided by the API includes: + *
      + *
    • Build information including the build number and timestamp.
    • + *
    • License information about the currently installed license.
    • + *
    • Feature information for the features that are currently enabled and + * available under the current license.
    • + *
    + * * @see Documentation * on elastic.co @@ -114,8 +132,9 @@ public CompletableFuture info() { // ----- Endpoint: xpack.usage /** - * This API provides information about which features are currently enabled and - * available under the current license and some usage statistics. + * Get usage information. Get information about the features that are currently + * enabled and available under the current license. The API also provides some + * usage statistics. * * @see Documentation @@ -130,8 +149,9 @@ public CompletableFuture usage(XpackUsageRequest request) { } /** - * This API provides information about which features are currently enabled and - * available under the current license and some usage statistics. + * Get usage information. Get information about the features that are currently + * enabled and available under the current license. The API also provides some + * usage statistics. * * @param fn * a function that initializes a builder to create the @@ -147,8 +167,9 @@ public final CompletableFuture usage( } /** - * This API provides information about which features are currently enabled and - * available under the current license and some usage statistics. + * Get usage information. Get information about the features that are currently + * enabled and available under the current license. The API also provides some + * usage statistics. * * @see Documentation diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/xpack/ElasticsearchXpackClient.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/xpack/ElasticsearchXpackClient.java index 890bd6d1d..5502d5c43 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/xpack/ElasticsearchXpackClient.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/xpack/ElasticsearchXpackClient.java @@ -68,8 +68,14 @@ public ElasticsearchXpackClient withTransportOptions(@Nullable TransportOptions // ----- Endpoint: xpack.info /** - * Provides general information about the installed X-Pack features. - * + * Get information. The information provided by the API includes: + *
      + *
    • Build information including the build number and timestamp.
    • + *
    • License information about the currently installed license.
    • + *
    • Feature information for the features that are currently enabled and + * available under the current license.
    • + *
    + * * @see
    Documentation * on elastic.co @@ -83,8 +89,14 @@ public XpackInfoResponse info(XpackInfoRequest request) throws IOException, Elas } /** - * Provides general information about the installed X-Pack features. - * + * Get information. The information provided by the API includes: + *
      + *
    • Build information including the build number and timestamp.
    • + *
    • License information about the currently installed license.
    • + *
    • Feature information for the features that are currently enabled and + * available under the current license.
    • + *
    + * * @param fn * a function that initializes a builder to create the * {@link XpackInfoRequest} @@ -99,8 +111,14 @@ public final XpackInfoResponse info(Function + *
  • Build information including the build number and timestamp.
  • + *
  • License information about the currently installed license.
  • + *
  • Feature information for the features that are currently enabled and + * available under the current license.
  • + * + * * @see Documentation * on elastic.co @@ -114,8 +132,9 @@ public XpackInfoResponse info() throws IOException, ElasticsearchException { // ----- Endpoint: xpack.usage /** - * This API provides information about which features are currently enabled and - * available under the current license and some usage statistics. + * Get usage information. Get information about the features that are currently + * enabled and available under the current license. The API also provides some + * usage statistics. * * @see Documentation @@ -130,8 +149,9 @@ public XpackUsageResponse usage(XpackUsageRequest request) throws IOException, E } /** - * This API provides information about which features are currently enabled and - * available under the current license and some usage statistics. + * Get usage information. Get information about the features that are currently + * enabled and available under the current license. The API also provides some + * usage statistics. * * @param fn * a function that initializes a builder to create the @@ -147,8 +167,9 @@ public final XpackUsageResponse usage(FunctionDocumentation diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/xpack/XpackInfoRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/xpack/XpackInfoRequest.java index 09787d0d0..b94596875 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/xpack/XpackInfoRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/xpack/XpackInfoRequest.java @@ -59,8 +59,14 @@ // typedef: xpack.info.Request /** - * Provides general information about the installed X-Pack features. - * + * Get information. The information provided by the API includes: + *
      + *
    • Build information including the build number and timestamp.
    • + *
    • License information about the currently installed license.
    • + *
    • Feature information for the features that are currently enabled and + * available under the current license.
    • + *
    + * * @see
    API * specification */ diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/xpack/XpackUsageRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/xpack/XpackUsageRequest.java index 4d44ca92f..ac5675e3f 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/xpack/XpackUsageRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/xpack/XpackUsageRequest.java @@ -55,8 +55,9 @@ // typedef: xpack.usage.Request /** - * This API provides information about which features are currently enabled and - * available under the current license and some usage statistics. + * Get usage information. Get information about the features that are currently + * enabled and available under the current license. The API also provides some + * usage statistics. * * @see API * specification