diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/_types/mapping/DenseVectorIndexOptions.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/_types/mapping/DenseVectorIndexOptions.java index 08b539636..c655332e0 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/_types/mapping/DenseVectorIndexOptions.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/_types/mapping/DenseVectorIndexOptions.java @@ -32,6 +32,7 @@ import jakarta.json.stream.JsonGenerator; import java.lang.Float; import java.lang.Integer; +import java.lang.String; import java.util.Objects; import java.util.function.Function; import javax.annotation.Nullable; @@ -61,25 +62,25 @@ */ @JsonpDeserializable public class DenseVectorIndexOptions implements JsonpSerializable { + private final String type; + @Nullable - private final Float confidenceInterval; + private final Integer m; @Nullable private final Integer efConstruction; @Nullable - private final Integer m; - - private final DenseVectorIndexOptionsType type; + private final Float confidenceInterval; // --------------------------------------------------------------------------------------------- private DenseVectorIndexOptions(Builder builder) { - this.confidenceInterval = builder.confidenceInterval; - this.efConstruction = builder.efConstruction; - this.m = builder.m; this.type = ApiTypeHelper.requireNonNull(builder.type, this, "type"); + this.m = builder.m; + this.efConstruction = builder.efConstruction; + this.confidenceInterval = builder.confidenceInterval; } @@ -88,64 +89,34 @@ public static DenseVectorIndexOptions of(Function0.90 and 1.0 or exactly - * 0. When the value is 0, this indicates that dynamic - * quantiles should be calculated for optimized quantization. When between - * 0.90 and 1.0, this value restricts the values used - * when calculating the quantization thresholds. - *

- * For example, a value of 0.95 will only use the middle - * 95% of the values when calculating the quantization thresholds - * (e.g. the highest and lowest 2.5% of values will be ignored). - *

- * Defaults to 1/(dims + 1) for int8 quantized vectors - * and 0 for int4 for dynamic quantile calculation. - *

- * Only applicable to int8_hnsw, int4_hnsw, - * int8_flat, and int4_flat index types. - *

- * API name: {@code confidence_interval} + * Required - API name: {@code type} */ - @Nullable - public final Float confidenceInterval() { - return this.confidenceInterval; + public final String type() { + return this.type; } /** - * The number of candidates to track while assembling the list of nearest - * neighbors for each new node. - *

- * Only applicable to hnsw, int8_hnsw, and - * int4_hnsw index types. - *

- * API name: {@code ef_construction} + * API name: {@code m} */ @Nullable - public final Integer efConstruction() { - return this.efConstruction; + public final Integer m() { + return this.m; } /** - * The number of neighbors each node will be connected to in the HNSW graph. - *

- * Only applicable to hnsw, int8_hnsw, and - * int4_hnsw index types. - *

- * API name: {@code m} + * API name: {@code ef_construction} */ @Nullable - public final Integer m() { - return this.m; + public final Integer efConstruction() { + return this.efConstruction; } /** - * Required - The type of kNN algorithm to use. - *

- * API name: {@code type} + * API name: {@code confidence_interval} */ - public final DenseVectorIndexOptionsType type() { - return this.type; + @Nullable + public final Float confidenceInterval() { + return this.confidenceInterval; } /** @@ -159,9 +130,12 @@ public void serialize(JsonGenerator generator, JsonpMapper mapper) { protected void serializeInternal(JsonGenerator generator, JsonpMapper mapper) { - if (this.confidenceInterval != null) { - generator.writeKey("confidence_interval"); - generator.write(this.confidenceInterval); + generator.writeKey("type"); + generator.write(this.type); + + if (this.m != null) { + generator.writeKey("m"); + generator.write(this.m); } if (this.efConstruction != null) { @@ -169,13 +143,11 @@ protected void serializeInternal(JsonGenerator generator, JsonpMapper mapper) { generator.write(this.efConstruction); } - if (this.m != null) { - generator.writeKey("m"); - generator.write(this.m); + if (this.confidenceInterval != null) { + generator.writeKey("confidence_interval"); + generator.write(this.confidenceInterval); } - generator.writeKey("type"); - this.type.serialize(generator, mapper); } @@ -193,76 +165,46 @@ public String toString() { public static class Builder extends WithJsonObjectBuilderBase implements ObjectBuilder { + private String type; + @Nullable - private Float confidenceInterval; + private Integer m; @Nullable private Integer efConstruction; @Nullable - private Integer m; - - private DenseVectorIndexOptionsType type; + private Float confidenceInterval; /** - * The confidence interval to use when quantizing the vectors. Can be any value - * between and including 0.90 and 1.0 or exactly - * 0. When the value is 0, this indicates that dynamic - * quantiles should be calculated for optimized quantization. When between - * 0.90 and 1.0, this value restricts the values used - * when calculating the quantization thresholds. - *

- * For example, a value of 0.95 will only use the middle - * 95% of the values when calculating the quantization thresholds - * (e.g. the highest and lowest 2.5% of values will be ignored). - *

- * Defaults to 1/(dims + 1) for int8 quantized vectors - * and 0 for int4 for dynamic quantile calculation. - *

- * Only applicable to int8_hnsw, int4_hnsw, - * int8_flat, and int4_flat index types. - *

- * API name: {@code confidence_interval} + * Required - API name: {@code type} */ - public final Builder confidenceInterval(@Nullable Float value) { - this.confidenceInterval = value; + public final Builder type(String value) { + this.type = value; return this; } /** - * The number of candidates to track while assembling the list of nearest - * neighbors for each new node. - *

- * Only applicable to hnsw, int8_hnsw, and - * int4_hnsw index types. - *

- * API name: {@code ef_construction} + * API name: {@code m} */ - public final Builder efConstruction(@Nullable Integer value) { - this.efConstruction = value; + public final Builder m(@Nullable Integer value) { + this.m = value; return this; } /** - * The number of neighbors each node will be connected to in the HNSW graph. - *

- * Only applicable to hnsw, int8_hnsw, and - * int4_hnsw index types. - *

- * API name: {@code m} + * API name: {@code ef_construction} */ - public final Builder m(@Nullable Integer value) { - this.m = value; + public final Builder efConstruction(@Nullable Integer value) { + this.efConstruction = value; return this; } /** - * Required - The type of kNN algorithm to use. - *

- * API name: {@code type} + * API name: {@code confidence_interval} */ - public final Builder type(DenseVectorIndexOptionsType value) { - this.type = value; + public final Builder confidenceInterval(@Nullable Float value) { + this.confidenceInterval = value; return this; } @@ -295,10 +237,10 @@ public DenseVectorIndexOptions build() { protected static void setupDenseVectorIndexOptionsDeserializer( ObjectDeserializer op) { - op.add(Builder::confidenceInterval, JsonpDeserializer.floatDeserializer(), "confidence_interval"); - op.add(Builder::efConstruction, JsonpDeserializer.integerDeserializer(), "ef_construction"); + op.add(Builder::type, JsonpDeserializer.stringDeserializer(), "type"); op.add(Builder::m, JsonpDeserializer.integerDeserializer(), "m"); - op.add(Builder::type, DenseVectorIndexOptionsType._DESERIALIZER, "type"); + op.add(Builder::efConstruction, JsonpDeserializer.integerDeserializer(), "ef_construction"); + op.add(Builder::confidenceInterval, JsonpDeserializer.floatDeserializer(), "confidence_interval"); } diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/_types/mapping/DenseVectorIndexOptionsType.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/_types/mapping/DenseVectorIndexOptionsType.java deleted file mode 100644 index 5d0cd38f3..000000000 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/_types/mapping/DenseVectorIndexOptionsType.java +++ /dev/null @@ -1,107 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package co.elastic.clients.elasticsearch._types.mapping; - -import co.elastic.clients.json.JsonEnum; -import co.elastic.clients.json.JsonpDeserializable; -import co.elastic.clients.json.JsonpDeserializer; - -//---------------------------------------------------------------- -// THIS CODE IS GENERATED. MANUAL EDITS WILL BE LOST. -//---------------------------------------------------------------- -// -// This code is generated from the Elasticsearch API specification -// at https://github.com/elastic/elasticsearch-specification -// -// Manual updates to this file will be lost when the code is -// re-generated. -// -// If you find a property that is missing or wrongly typed, please -// open an issue or a PR on the API specification repository. -// -//---------------------------------------------------------------- - -/** - * - * @see API - * specification - */ -@JsonpDeserializable -public enum DenseVectorIndexOptionsType implements JsonEnum { - /** - * This utilizes a brute-force search algorithm for exact kNN search. This - * supports all element_type values. - */ - Flat("flat"), - - /** - * This utilizes the HNSW algorithm for scalable approximate kNN search. This - * supports all element_type values. - */ - Hnsw("hnsw"), - - /** - * This utilizes a brute-force search algorithm in addition to automatically - * half-byte scalar quantization. Only supports element_type of - * float. - */ - Int4Flat("int4_flat"), - - /** - * This utilizes the HNSW algorithm in addition to automatically scalar - * quantization for scalable approximate kNN search with - * element_type of float. - *

- * This can reduce the memory footprint by 8x at the cost of some accuracy. - */ - Int4Hnsw("int4_hnsw"), - - /** - * This utilizes a brute-force search algorithm in addition to automatically - * scalar quantization. Only supports element_type of - * float. - */ - Int8Flat("int8_flat"), - - /** - * The default index type for float vectors. This utilizes the HNSW - * algorithm in addition to automatically scalar quantization for scalable - * approximate kNN search with element_type of float. - *

- * This can reduce the memory footprint by 4x at the cost of some accuracy. - */ - Int8Hnsw("int8_hnsw"), - - ; - - private final String jsonValue; - - DenseVectorIndexOptionsType(String jsonValue) { - this.jsonValue = jsonValue; - } - - public String jsonValue() { - return this.jsonValue; - } - - public static final JsonEnum.Deserializer _DESERIALIZER = new JsonEnum.Deserializer<>( - DenseVectorIndexOptionsType.values()); -} diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/_types/mapping/DenseVectorProperty.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/_types/mapping/DenseVectorProperty.java index 0fa3c2d3b..200d4fc98 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/_types/mapping/DenseVectorProperty.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/_types/mapping/DenseVectorProperty.java @@ -28,6 +28,7 @@ import jakarta.json.stream.JsonGenerator; import java.lang.Boolean; import java.lang.Integer; +import java.lang.String; import java.util.Objects; import java.util.function.Function; import javax.annotation.Nullable; @@ -57,11 +58,14 @@ */ @JsonpDeserializable public class DenseVectorProperty extends PropertyBase implements PropertyVariant { + @Nullable + private final String elementType; + @Nullable private final Integer dims; @Nullable - private final DenseVectorElementType elementType; + private final String similarity; @Nullable private final Boolean index; @@ -69,19 +73,16 @@ public class DenseVectorProperty extends PropertyBase implements PropertyVariant @Nullable private final DenseVectorIndexOptions indexOptions; - @Nullable - private final DenseVectorSimilarity similarity; - // --------------------------------------------------------------------------------------------- private DenseVectorProperty(Builder builder) { super(builder); - this.dims = builder.dims; this.elementType = builder.elementType; + this.dims = builder.dims; + this.similarity = builder.similarity; this.index = builder.index; this.indexOptions = builder.indexOptions; - this.similarity = builder.similarity; } @@ -98,10 +99,14 @@ public Property.Kind _propertyKind() { } /** - * Number of vector dimensions. Can't exceed 4096. If - * dims is not specified, it will be set to the length of the first - * vector added to the field. - *

+ * API name: {@code element_type} + */ + @Nullable + public final String elementType() { + return this.elementType; + } + + /** * API name: {@code dims} */ @Nullable @@ -110,19 +115,14 @@ public final Integer dims() { } /** - * The data type used to encode vectors. The supported data types are - * float (default), byte, and bit. - *

- * API name: {@code element_type} + * API name: {@code similarity} */ @Nullable - public final DenseVectorElementType elementType() { - return this.elementType; + public final String similarity() { + return this.similarity; } /** - * If true, you can search this field using the kNN search API. - *

* API name: {@code index} */ @Nullable @@ -131,14 +131,6 @@ public final Boolean index() { } /** - * An optional section that configures the kNN indexing algorithm. The HNSW - * algorithm has two internal parameters that influence how the data structure - * is built. These can be adjusted to improve the accuracy of results, at the - * expense of slower indexing speed. - *

- * This parameter can only be specified when index is - * true. - *

* API name: {@code index_options} */ @Nullable @@ -146,42 +138,24 @@ public final DenseVectorIndexOptions indexOptions() { return this.indexOptions; } - /** - * The vector similarity metric to use in kNN search. - *

- * Documents are ranked by their vector field's similarity to the query vector. - * The _score of each document will be derived from the similarity, - * in a way that ensures scores are positive and that a larger score corresponds - * to a higher ranking. - *

- * Defaults to l2_norm when element_type is - * bit otherwise defaults to cosine. - *

- * bit vectors only support l2_norm as their - * similarity metric. - *

- * This parameter can only be specified when index is - * true. - *

- * API name: {@code similarity} - */ - @Nullable - public final DenseVectorSimilarity similarity() { - return this.similarity; - } - protected void serializeInternal(JsonGenerator generator, JsonpMapper mapper) { generator.write("type", "dense_vector"); super.serializeInternal(generator, mapper); + if (this.elementType != null) { + generator.writeKey("element_type"); + generator.write(this.elementType); + + } if (this.dims != null) { generator.writeKey("dims"); generator.write(this.dims); } - if (this.elementType != null) { - generator.writeKey("element_type"); - this.elementType.serialize(generator, mapper); + if (this.similarity != null) { + generator.writeKey("similarity"); + generator.write(this.similarity); + } if (this.index != null) { generator.writeKey("index"); @@ -193,10 +167,6 @@ protected void serializeInternal(JsonGenerator generator, JsonpMapper mapper) { this.indexOptions.serialize(generator, mapper); } - if (this.similarity != null) { - generator.writeKey("similarity"); - this.similarity.serialize(generator, mapper); - } } @@ -209,11 +179,14 @@ protected void serializeInternal(JsonGenerator generator, JsonpMapper mapper) { public static class Builder extends PropertyBase.AbstractBuilder implements ObjectBuilder { + @Nullable + private String elementType; + @Nullable private Integer dims; @Nullable - private DenseVectorElementType elementType; + private String similarity; @Nullable private Boolean index; @@ -221,14 +194,15 @@ public static class Builder extends PropertyBase.AbstractBuilder @Nullable private DenseVectorIndexOptions indexOptions; - @Nullable - private DenseVectorSimilarity similarity; + /** + * API name: {@code element_type} + */ + public final Builder elementType(@Nullable String value) { + this.elementType = value; + return this; + } /** - * Number of vector dimensions. Can't exceed 4096. If - * dims is not specified, it will be set to the length of the first - * vector added to the field. - *

* API name: {@code dims} */ public final Builder dims(@Nullable Integer value) { @@ -237,19 +211,14 @@ public final Builder dims(@Nullable Integer value) { } /** - * The data type used to encode vectors. The supported data types are - * float (default), byte, and bit. - *

- * API name: {@code element_type} + * API name: {@code similarity} */ - public final Builder elementType(@Nullable DenseVectorElementType value) { - this.elementType = value; + public final Builder similarity(@Nullable String value) { + this.similarity = value; return this; } /** - * If true, you can search this field using the kNN search API. - *

* API name: {@code index} */ public final Builder index(@Nullable Boolean value) { @@ -258,14 +227,6 @@ public final Builder index(@Nullable Boolean value) { } /** - * An optional section that configures the kNN indexing algorithm. The HNSW - * algorithm has two internal parameters that influence how the data structure - * is built. These can be adjusted to improve the accuracy of results, at the - * expense of slower indexing speed. - *

- * This parameter can only be specified when index is - * true. - *

* API name: {@code index_options} */ public final Builder indexOptions(@Nullable DenseVectorIndexOptions value) { @@ -274,14 +235,6 @@ public final Builder indexOptions(@Nullable DenseVectorIndexOptions value) { } /** - * An optional section that configures the kNN indexing algorithm. The HNSW - * algorithm has two internal parameters that influence how the data structure - * is built. These can be adjusted to improve the accuracy of results, at the - * expense of slower indexing speed. - *

- * This parameter can only be specified when index is - * true. - *

* API name: {@code index_options} */ public final Builder indexOptions( @@ -289,30 +242,6 @@ public final Builder indexOptions( return this.indexOptions(fn.apply(new DenseVectorIndexOptions.Builder()).build()); } - /** - * The vector similarity metric to use in kNN search. - *

- * Documents are ranked by their vector field's similarity to the query vector. - * The _score of each document will be derived from the similarity, - * in a way that ensures scores are positive and that a larger score corresponds - * to a higher ranking. - *

- * Defaults to l2_norm when element_type is - * bit otherwise defaults to cosine. - *

- * bit vectors only support l2_norm as their - * similarity metric. - *

- * This parameter can only be specified when index is - * true. - *

- * API name: {@code similarity} - */ - public final Builder similarity(@Nullable DenseVectorSimilarity value) { - this.similarity = value; - return this; - } - @Override protected Builder self() { return this; @@ -341,11 +270,11 @@ public DenseVectorProperty build() { protected static void setupDenseVectorPropertyDeserializer(ObjectDeserializer op) { PropertyBase.setupPropertyBaseDeserializer(op); + op.add(Builder::elementType, JsonpDeserializer.stringDeserializer(), "element_type"); op.add(Builder::dims, JsonpDeserializer.integerDeserializer(), "dims"); - op.add(Builder::elementType, DenseVectorElementType._DESERIALIZER, "element_type"); + op.add(Builder::similarity, JsonpDeserializer.stringDeserializer(), "similarity"); op.add(Builder::index, JsonpDeserializer.booleanDeserializer(), "index"); op.add(Builder::indexOptions, DenseVectorIndexOptions._DESERIALIZER, "index_options"); - op.add(Builder::similarity, DenseVectorSimilarity._DESERIALIZER, "similarity"); op.ignore("type"); } diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/_types/mapping/DenseVectorSimilarity.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/_types/mapping/DenseVectorSimilarity.java deleted file mode 100644 index e1d7f162d..000000000 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/_types/mapping/DenseVectorSimilarity.java +++ /dev/null @@ -1,128 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package co.elastic.clients.elasticsearch._types.mapping; - -import co.elastic.clients.json.JsonEnum; -import co.elastic.clients.json.JsonpDeserializable; -import co.elastic.clients.json.JsonpDeserializer; - -//---------------------------------------------------------------- -// THIS CODE IS GENERATED. MANUAL EDITS WILL BE LOST. -//---------------------------------------------------------------- -// -// This code is generated from the Elasticsearch API specification -// at https://github.com/elastic/elasticsearch-specification -// -// Manual updates to this file will be lost when the code is -// re-generated. -// -// If you find a property that is missing or wrongly typed, please -// open an issue or a PR on the API specification repository. -// -//---------------------------------------------------------------- - -/** - * - * @see API - * specification - */ -@JsonpDeserializable -public enum DenseVectorSimilarity implements JsonEnum { - /** - * Computes the cosine similarity. During indexing Elasticsearch automatically - * normalizes vectors with cosine similarity to unit length. This - * allows to internally use dot_product for computing similarity, - * which is more efficient. Original un-normalized vectors can be still accessed - * through scripts. - *

- * The document _score is computed as - * (1 + cosine(query, vector)) / 2. - *

- * The cosine similarity does not allow vectors with zero - * magnitude, since cosine is not defined in this case. - */ - Cosine("cosine"), - - /** - * Computes the dot product of two unit vectors. This option provides an - * optimized way to perform cosine similarity. The constraints and computed - * score are defined by element_type. - *

- * When element_type is float, all vectors must be - * unit length, including both document and query vectors. - *

- * The document _score is computed as - * (1 + dot_product(query, vector)) / 2. - *

- * When element_type is byte, all vectors must have - * the same length including both document and query vectors or results will be - * inaccurate. - *

- * The document _score is computed as - * 0.5 + (dot_product(query, vector) / (32768 * dims)) where - * dims is the number of dimensions per vector. - */ - DotProduct("dot_product"), - - /** - * Computes similarity based on the L2 distance (also known as - * Euclidean distance) between the vectors. - *

- * The document _score is computed as - * 1 / (1 + l2_norm(query, vector)^2). - *

- * For bit vectors, instead of using l2_norm, the - * hamming distance between the vectors is used. - *

- * The _score transformation is - * (numBits - hamming(a, b)) / numBits. - */ - L2Norm("l2_norm"), - - /** - * Computes the maximum inner product of two vectors. This is similar to - * dot_product, but doesn't require vectors to be normalized. This - * means that each vector’s magnitude can significantly effect the score. - *

- * The document _score is adjusted to prevent negative values. For - * max_inner_product values < 0, the - * _score is - * 1 / (1 + -1 * max_inner_product(query, vector)). For - * non-negative max_inner_product results the _score - * is calculated max_inner_product(query, vector) + 1. - */ - MaxInnerProduct("max_inner_product"), - - ; - - private final String jsonValue; - - DenseVectorSimilarity(String jsonValue) { - this.jsonValue = jsonValue; - } - - public String jsonValue() { - return this.jsonValue; - } - - public static final JsonEnum.Deserializer _DESERIALIZER = new JsonEnum.Deserializer<>( - DenseVectorSimilarity.values()); -} diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/_types/query_dsl/TermsSetQuery.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/_types/query_dsl/TermsSetQuery.java index e541a91f6..29ac04f6d 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/_types/query_dsl/TermsSetQuery.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/_types/query_dsl/TermsSetQuery.java @@ -19,7 +19,6 @@ package co.elastic.clients.elasticsearch._types.query_dsl; -import co.elastic.clients.elasticsearch._types.FieldValue; import co.elastic.clients.elasticsearch._types.Script; import co.elastic.clients.json.JsonpDeserializable; import co.elastic.clients.json.JsonpDeserializer; @@ -30,7 +29,6 @@ import co.elastic.clients.util.ObjectBuilder; import jakarta.json.stream.JsonGenerator; import java.lang.String; -import java.util.ArrayList; import java.util.List; import java.util.Objects; import java.util.function.Function; @@ -73,7 +71,7 @@ public class TermsSetQuery extends QueryBase implements QueryVariant { @Nullable private final Script minimumShouldMatchScript; - private final List terms; + private final List terms; // --------------------------------------------------------------------------------------------- @@ -145,7 +143,7 @@ public final Script minimumShouldMatchScript() { *

* API name: {@code terms} */ - public final List terms() { + public final List terms() { return this.terms; } @@ -171,8 +169,8 @@ protected void serializeInternal(JsonGenerator generator, JsonpMapper mapper) { if (ApiTypeHelper.isDefined(this.terms)) { generator.writeKey("terms"); generator.writeStartArray(); - for (FieldValue item0 : this.terms) { - item0.serialize(generator, mapper); + for (String item0 : this.terms) { + generator.write(item0); } generator.writeEnd(); @@ -209,7 +207,7 @@ public final Builder field(String value) { @Nullable private Script minimumShouldMatchScript; - private List terms; + private List terms; /** * Specification describing number of matching terms required to return a @@ -261,7 +259,7 @@ public final Builder minimumShouldMatchScript(Function * Adds all elements of list to terms. */ - public final Builder terms(List list) { + public final Builder terms(List list) { this.terms = _listAddAll(this.terms, list); return this; } @@ -273,90 +271,11 @@ public final Builder terms(List list) { *

* Adds one or more values to terms. */ - public final Builder terms(FieldValue value, FieldValue... values) { - this.terms = _listAdd(this.terms, value, values); - return this; - } - - /** - * Required - Array of terms you wish to find in the provided field. - *

- * API name: {@code terms} - *

- * Adds all passed values to terms. - */ public final Builder terms(String value, String... values) { - this.terms = _listAdd(this.terms, FieldValue.of(value)); - List fieldValues = new ArrayList<>(); - for (String v : values) { - fieldValues.add(FieldValue.of(v)); - } - this.terms = _listAddAll(this.terms, fieldValues); - return this; - } - - /** - * Required - Array of terms you wish to find in the provided field. - *

- * API name: {@code terms} - *

- * Adds all passed values to terms. - */ - public final Builder terms(long value, long... values) { - this.terms = _listAdd(this.terms, FieldValue.of(value)); - List fieldValues = new ArrayList<>(); - for (long v : values) { - fieldValues.add(FieldValue.of(v)); - } - this.terms = _listAddAll(this.terms, fieldValues); - return this; - } - - /** - * Required - Array of terms you wish to find in the provided field. - *

- * API name: {@code terms} - *

- * Adds all passed values to terms. - */ - public final Builder terms(double value, double... values) { - this.terms = _listAdd(this.terms, FieldValue.of(value)); - List fieldValues = new ArrayList<>(); - for (double v : values) { - fieldValues.add(FieldValue.of(v)); - } - this.terms = _listAddAll(this.terms, fieldValues); - return this; - } - - /** - * Required - Array of terms you wish to find in the provided field. - *

- * API name: {@code terms} - *

- * Adds all passed values to terms. - */ - public final Builder terms(boolean value, boolean... values) { - this.terms = _listAdd(this.terms, FieldValue.of(value)); - List fieldValues = new ArrayList<>(); - for (boolean v : values) { - fieldValues.add(FieldValue.of(v)); - } - this.terms = _listAddAll(this.terms, fieldValues); + this.terms = _listAdd(this.terms, value, values); return this; } - /** - * Required - Array of terms you wish to find in the provided field. - *

- * API name: {@code terms} - *

- * Adds a value to terms using a builder lambda. - */ - public final Builder terms(Function> fn) { - return terms(fn.apply(new FieldValue.Builder()).build()); - } - @Override protected Builder self() { return this; @@ -388,7 +307,7 @@ protected static void setupTermsSetQueryDeserializer(ObjectDeserializer impleme @Nullable private Long maxConcurrentShardRequests; + @Nullable + private String minCompatibleShardNode; + @Nullable private Double minScore; @@ -1735,6 +1750,14 @@ public final Builder maxConcurrentShardRequests(@Nullable Long value) { return this; } + /** + * API name: {@code min_compatible_shard_node} + */ + public final Builder minCompatibleShardNode(@Nullable String value) { + this.minCompatibleShardNode = value; + return this; + } + /** * Minimum _score for matching documents. Documents with a lower _score are not * included in the search results. @@ -2421,6 +2444,9 @@ protected static void setupSubmitRequestDeserializer(ObjectDeserializer expandWildcards; + @Nullable + private final Boolean local; + + @Nullable + private final Time masterTimeout; + private final List name; // --------------------------------------------------------------------------------------------- @@ -78,6 +86,8 @@ public class AliasesRequest extends CatRequestBase { private AliasesRequest(Builder builder) { this.expandWildcards = ApiTypeHelper.unmodifiable(builder.expandWildcards); + this.local = builder.local; + this.masterTimeout = builder.masterTimeout; this.name = ApiTypeHelper.unmodifiable(builder.name); } @@ -96,6 +106,30 @@ public final List expandWildcards() { return this.expandWildcards; } + /** + * If true, the request computes the list of selected nodes from + * the local cluster state. If false the list of selected nodes are + * computed from the cluster state of the master node. In both cases the + * coordinating node will send requests for further information to each selected + * node. + *

+ * API name: {@code local} + */ + @Nullable + public final Boolean local() { + return this.local; + } + + /** + * Period to wait for a connection to the master node. + *

+ * API name: {@code master_timeout} + */ + @Nullable + public final Time masterTimeout() { + return this.masterTimeout; + } + /** * A comma-separated list of aliases to retrieve. Supports wildcards * (*). To retrieve all aliases, omit this parameter or use @@ -119,6 +153,12 @@ public static class Builder extends CatRequestBase.AbstractBuilder @Nullable private List expandWildcards; + @Nullable + private Boolean local; + + @Nullable + private Time masterTimeout; + @Nullable private List name; @@ -148,6 +188,39 @@ public final Builder expandWildcards(ExpandWildcard value, ExpandWildcard... val return this; } + /** + * If true, the request computes the list of selected nodes from + * the local cluster state. If false the list of selected nodes are + * computed from the cluster state of the master node. In both cases the + * coordinating node will send requests for further information to each selected + * node. + *

+ * API name: {@code local} + */ + public final Builder local(@Nullable Boolean value) { + this.local = value; + return this; + } + + /** + * Period to wait for a connection to the master node. + *

+ * API name: {@code master_timeout} + */ + public final Builder masterTimeout(@Nullable Time value) { + this.masterTimeout = value; + return this; + } + + /** + * Period to wait for a connection to the master node. + *

+ * API name: {@code master_timeout} + */ + public final Builder masterTimeout(Function> fn) { + return this.masterTimeout(fn.apply(new Time.Builder()).build()); + } + /** * A comma-separated list of aliases to retrieve. Supports wildcards * (*). To retrieve all aliases, omit this parameter or use @@ -257,10 +330,16 @@ public AliasesRequest build() { request -> { Map params = new HashMap<>(); params.put("format", "json"); + if (request.masterTimeout != null) { + params.put("master_timeout", request.masterTimeout._toJsonString()); + } if (ApiTypeHelper.isDefined(request.expandWildcards)) { params.put("expand_wildcards", request.expandWildcards.stream().map(v -> v.jsonValue()).collect(Collectors.joining(","))); } + if (request.local != null) { + params.put("local", String.valueOf(request.local)); + } return params; }, SimpleEndpoint.emptyMap(), false, AliasesResponse._DESERIALIZER); diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/AllocationRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/AllocationRequest.java index 208adcedb..ba154296b 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/AllocationRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/AllocationRequest.java @@ -21,6 +21,7 @@ import co.elastic.clients.elasticsearch._types.Bytes; import co.elastic.clients.elasticsearch._types.ErrorResponse; +import co.elastic.clients.elasticsearch._types.Time; import co.elastic.clients.json.JsonpDeserializable; import co.elastic.clients.json.JsonpDeserializer; import co.elastic.clients.json.ObjectBuilderDeserializer; @@ -58,10 +59,10 @@ // typedef: cat.allocation.Request /** - * Get shard allocation information. Get a snapshot of the number of shards - * allocated to each data node and their disk space. IMPORTANT: cat APIs are - * only intended for human consumption using the command line or Kibana console. - * They are not intended for use by applications. + * Provides a snapshot of the number of shards allocated to each data node and + * their disk space. IMPORTANT: cat APIs are only intended for human consumption + * using the command line or Kibana console. They are not intended for use by + * applications. * * @see API * specification @@ -74,6 +75,9 @@ public class AllocationRequest extends CatRequestBase { @Nullable private final Boolean local; + @Nullable + private final Time masterTimeout; + private final List nodeId; // --------------------------------------------------------------------------------------------- @@ -82,6 +86,7 @@ private AllocationRequest(Builder builder) { this.bytes = builder.bytes; this.local = builder.local; + this.masterTimeout = builder.masterTimeout; this.nodeId = ApiTypeHelper.unmodifiable(builder.nodeId); } @@ -114,6 +119,16 @@ public final Boolean local() { return this.local; } + /** + * Period to wait for a connection to the master node. + *

+ * API name: {@code master_timeout} + */ + @Nullable + public final Time masterTimeout() { + return this.masterTimeout; + } + /** * Comma-separated list of node identifiers or names used to limit the returned * information. @@ -139,6 +154,9 @@ public static class Builder extends CatRequestBase.AbstractBuilder @Nullable private Boolean local; + @Nullable + private Time masterTimeout; + @Nullable private List nodeId; @@ -166,6 +184,25 @@ public final Builder local(@Nullable Boolean value) { return this; } + /** + * Period to wait for a connection to the master node. + *

+ * API name: {@code master_timeout} + */ + public final Builder masterTimeout(@Nullable Time value) { + this.masterTimeout = value; + return this; + } + + /** + * Period to wait for a connection to the master node. + *

+ * API name: {@code master_timeout} + */ + public final Builder masterTimeout(Function> fn) { + return this.masterTimeout(fn.apply(new Time.Builder()).build()); + } + /** * Comma-separated list of node identifiers or names used to limit the returned * information. @@ -274,6 +311,9 @@ public AllocationRequest build() { request -> { Map params = new HashMap<>(); params.put("format", "json"); + if (request.masterTimeout != null) { + params.put("master_timeout", request.masterTimeout._toJsonString()); + } if (request.bytes != null) { params.put("bytes", request.bytes.jsonValue()); } diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/ComponentTemplatesRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/ComponentTemplatesRequest.java index ef0891a3a..556e3a40b 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/ComponentTemplatesRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/ComponentTemplatesRequest.java @@ -20,6 +20,7 @@ package co.elastic.clients.elasticsearch.cat; import co.elastic.clients.elasticsearch._types.ErrorResponse; +import co.elastic.clients.elasticsearch._types.Time; import co.elastic.clients.json.JsonpDeserializable; import co.elastic.clients.json.JsonpDeserializer; import co.elastic.clients.json.ObjectBuilderDeserializer; @@ -70,6 +71,9 @@ public class ComponentTemplatesRequest extends CatRequestBase { @Nullable private final Boolean local; + @Nullable + private final Time masterTimeout; + @Nullable private final String name; @@ -78,6 +82,7 @@ public class ComponentTemplatesRequest extends CatRequestBase { private ComponentTemplatesRequest(Builder builder) { this.local = builder.local; + this.masterTimeout = builder.masterTimeout; this.name = builder.name; } @@ -100,6 +105,16 @@ public final Boolean local() { return this.local; } + /** + * Period to wait for a connection to the master node. + *

+ * API name: {@code master_timeout} + */ + @Nullable + public final Time masterTimeout() { + return this.masterTimeout; + } + /** * The name of the component template. Accepts wildcard expressions. If omitted, * all component templates are returned. @@ -123,6 +138,9 @@ public static class Builder extends CatRequestBase.AbstractBuilder @Nullable private Boolean local; + @Nullable + private Time masterTimeout; + @Nullable private String name; @@ -140,6 +158,25 @@ public final Builder local(@Nullable Boolean value) { return this; } + /** + * Period to wait for a connection to the master node. + *

+ * API name: {@code master_timeout} + */ + public final Builder masterTimeout(@Nullable Time value) { + this.masterTimeout = value; + return this; + } + + /** + * Period to wait for a connection to the master node. + *

+ * API name: {@code master_timeout} + */ + public final Builder masterTimeout(Function> fn) { + return this.masterTimeout(fn.apply(new Time.Builder()).build()); + } + /** * The name of the component template. Accepts wildcard expressions. If omitted, * all component templates are returned. @@ -232,6 +269,9 @@ public ComponentTemplatesRequest build() { request -> { Map params = new HashMap<>(); params.put("format", "json"); + if (request.masterTimeout != null) { + params.put("master_timeout", request.masterTimeout._toJsonString()); + } if (request.local != null) { params.put("local", String.valueOf(request.local)); } diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/ElasticsearchCatAsyncClient.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/ElasticsearchCatAsyncClient.java index 6a07e9fff..c748e1270 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/ElasticsearchCatAsyncClient.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/ElasticsearchCatAsyncClient.java @@ -128,10 +128,10 @@ public CompletableFuture aliases() { // ----- Endpoint: cat.allocation /** - * Get shard allocation information. Get a snapshot of the number of shards - * allocated to each data node and their disk space. IMPORTANT: cat APIs are - * only intended for human consumption using the command line or Kibana console. - * They are not intended for use by applications. + * Provides a snapshot of the number of shards allocated to each data node and + * their disk space. IMPORTANT: cat APIs are only intended for human consumption + * using the command line or Kibana console. They are not intended for use by + * applications. * * @see Documentation @@ -146,10 +146,10 @@ public CompletableFuture allocation(AllocationRequest reques } /** - * Get shard allocation information. Get a snapshot of the number of shards - * allocated to each data node and their disk space. IMPORTANT: cat APIs are - * only intended for human consumption using the command line or Kibana console. - * They are not intended for use by applications. + * Provides a snapshot of the number of shards allocated to each data node and + * their disk space. IMPORTANT: cat APIs are only intended for human consumption + * using the command line or Kibana console. They are not intended for use by + * applications. * * @param fn * a function that initializes a builder to create the @@ -165,10 +165,10 @@ public final CompletableFuture allocation( } /** - * Get shard allocation information. Get a snapshot of the number of shards - * allocated to each data node and their disk space. IMPORTANT: cat APIs are - * only intended for human consumption using the command line or Kibana console. - * They are not intended for use by applications. + * Provides a snapshot of the number of shards allocated to each data node and + * their disk space. IMPORTANT: cat APIs are only intended for human consumption + * using the command line or Kibana console. They are not intended for use by + * applications. * * @see Documentation @@ -314,11 +314,11 @@ public CompletableFuture count() { // ----- Endpoint: cat.fielddata /** - * Get field data cache information. Get the amount of heap memory currently - * used by the field data cache on every data node in the cluster. IMPORTANT: - * cat APIs are only intended for human consumption using the command line or - * Kibana console. They are not intended for use by applications. For - * application consumption, use the nodes stats API. + * Returns the amount of heap memory currently used by the field data cache on + * every data node in the cluster. IMPORTANT: cat APIs are only intended for + * human consumption using the command line or Kibana console. They are not + * intended for use by applications. For application consumption, use the nodes + * stats API. * * @see Documentation @@ -333,11 +333,11 @@ public CompletableFuture fielddata(FielddataRequest request) } /** - * Get field data cache information. Get the amount of heap memory currently - * used by the field data cache on every data node in the cluster. IMPORTANT: - * cat APIs are only intended for human consumption using the command line or - * Kibana console. They are not intended for use by applications. For - * application consumption, use the nodes stats API. + * Returns the amount of heap memory currently used by the field data cache on + * every data node in the cluster. IMPORTANT: cat APIs are only intended for + * human consumption using the command line or Kibana console. They are not + * intended for use by applications. For application consumption, use the nodes + * stats API. * * @param fn * a function that initializes a builder to create the @@ -353,11 +353,11 @@ public final CompletableFuture fielddata( } /** - * Get field data cache information. Get the amount of heap memory currently - * used by the field data cache on every data node in the cluster. IMPORTANT: - * cat APIs are only intended for human consumption using the command line or - * Kibana console. They are not intended for use by applications. For - * application consumption, use the nodes stats API. + * Returns the amount of heap memory currently used by the field data cache on + * every data node in the cluster. IMPORTANT: cat APIs are only intended for + * human consumption using the command line or Kibana console. They are not + * intended for use by applications. For application consumption, use the nodes + * stats API. * * @see Documentation @@ -372,18 +372,18 @@ public CompletableFuture fielddata() { // ----- Endpoint: cat.health /** - * Get the cluster health status. IMPORTANT: cat APIs are only intended for - * human consumption using the command line or Kibana console. They are not - * intended for use by applications. For application consumption, use the - * cluster health API. This API is often used to check malfunctioning clusters. - * To help you track cluster health alongside log files and alerting systems, - * the API returns timestamps in two formats: HH:MM:SS, which is - * human-readable but includes no date information; - * Unix epoch time, which is machine-sortable and includes date - * information. The latter format is useful for cluster recoveries that take - * multiple days. You can use the cat health API to verify cluster health across - * multiple nodes. You also can use the API to track the recovery of a large - * cluster over a longer period of time. + * Returns the health status of a cluster, similar to the cluster health API. + * IMPORTANT: cat APIs are only intended for human consumption using the command + * line or Kibana console. They are not intended for use by applications. For + * application consumption, use the cluster health API. This API is often used + * to check malfunctioning clusters. To help you track cluster health alongside + * log files and alerting systems, the API returns timestamps in two formats: + * HH:MM:SS, which is human-readable but includes no date + * information; Unix epoch time, which is machine-sortable and + * includes date information. The latter format is useful for cluster recoveries + * that take multiple days. You can use the cat health API to verify cluster + * health across multiple nodes. You also can use the API to track the recovery + * of a large cluster over a longer period of time. * * @see Documentation @@ -398,18 +398,18 @@ public CompletableFuture health(HealthRequest request) { } /** - * Get the cluster health status. IMPORTANT: cat APIs are only intended for - * human consumption using the command line or Kibana console. They are not - * intended for use by applications. For application consumption, use the - * cluster health API. This API is often used to check malfunctioning clusters. - * To help you track cluster health alongside log files and alerting systems, - * the API returns timestamps in two formats: HH:MM:SS, which is - * human-readable but includes no date information; - * Unix epoch time, which is machine-sortable and includes date - * information. The latter format is useful for cluster recoveries that take - * multiple days. You can use the cat health API to verify cluster health across - * multiple nodes. You also can use the API to track the recovery of a large - * cluster over a longer period of time. + * Returns the health status of a cluster, similar to the cluster health API. + * IMPORTANT: cat APIs are only intended for human consumption using the command + * line or Kibana console. They are not intended for use by applications. For + * application consumption, use the cluster health API. This API is often used + * to check malfunctioning clusters. To help you track cluster health alongside + * log files and alerting systems, the API returns timestamps in two formats: + * HH:MM:SS, which is human-readable but includes no date + * information; Unix epoch time, which is machine-sortable and + * includes date information. The latter format is useful for cluster recoveries + * that take multiple days. You can use the cat health API to verify cluster + * health across multiple nodes. You also can use the API to track the recovery + * of a large cluster over a longer period of time. * * @param fn * a function that initializes a builder to create the @@ -425,18 +425,18 @@ public final CompletableFuture health( } /** - * Get the cluster health status. IMPORTANT: cat APIs are only intended for - * human consumption using the command line or Kibana console. They are not - * intended for use by applications. For application consumption, use the - * cluster health API. This API is often used to check malfunctioning clusters. - * To help you track cluster health alongside log files and alerting systems, - * the API returns timestamps in two formats: HH:MM:SS, which is - * human-readable but includes no date information; - * Unix epoch time, which is machine-sortable and includes date - * information. The latter format is useful for cluster recoveries that take - * multiple days. You can use the cat health API to verify cluster health across - * multiple nodes. You also can use the API to track the recovery of a large - * cluster over a longer period of time. + * Returns the health status of a cluster, similar to the cluster health API. + * IMPORTANT: cat APIs are only intended for human consumption using the command + * line or Kibana console. They are not intended for use by applications. For + * application consumption, use the cluster health API. This API is often used + * to check malfunctioning clusters. To help you track cluster health alongside + * log files and alerting systems, the API returns timestamps in two formats: + * HH:MM:SS, which is human-readable but includes no date + * information; Unix epoch time, which is machine-sortable and + * includes date information. The latter format is useful for cluster recoveries + * that take multiple days. You can use the cat health API to verify cluster + * health across multiple nodes. You also can use the API to track the recovery + * of a large cluster over a longer period of time. * * @see Documentation @@ -570,11 +570,10 @@ public CompletableFuture indices() { // ----- Endpoint: cat.master /** - * Get master node information. Get information about the master node, including - * the ID, bound IP address, and name. IMPORTANT: cat APIs are only intended for - * human consumption using the command line or Kibana console. They are not - * intended for use by applications. For application consumption, use the nodes - * info API. + * Returns information about the master node, including the ID, bound IP + * address, and name. IMPORTANT: cat APIs are only intended for human + * consumption using the command line or Kibana console. They are not intended + * for use by applications. For application consumption, use the nodes info API. * * @see Documentation @@ -589,11 +588,10 @@ public CompletableFuture master(MasterRequest request) { } /** - * Get master node information. Get information about the master node, including - * the ID, bound IP address, and name. IMPORTANT: cat APIs are only intended for - * human consumption using the command line or Kibana console. They are not - * intended for use by applications. For application consumption, use the nodes - * info API. + * Returns information about the master node, including the ID, bound IP + * address, and name. IMPORTANT: cat APIs are only intended for human + * consumption using the command line or Kibana console. They are not intended + * for use by applications. For application consumption, use the nodes info API. * * @param fn * a function that initializes a builder to create the @@ -609,11 +607,10 @@ public final CompletableFuture master( } /** - * Get master node information. Get information about the master node, including - * the ID, bound IP address, and name. IMPORTANT: cat APIs are only intended for - * human consumption using the command line or Kibana console. They are not - * intended for use by applications. For application consumption, use the nodes - * info API. + * Returns information about the master node, including the ID, bound IP + * address, and name. IMPORTANT: cat APIs are only intended for human + * consumption using the command line or Kibana console. They are not intended + * for use by applications. For application consumption, use the nodes info API. * * @see Documentation @@ -890,10 +887,10 @@ public CompletableFuture mlTrainedModels() { // ----- Endpoint: cat.nodeattrs /** - * Get node attribute information. Get information about custom node attributes. - * IMPORTANT: cat APIs are only intended for human consumption using the command - * line or Kibana console. They are not intended for use by applications. For - * application consumption, use the nodes info API. + * Returns information about custom node attributes. IMPORTANT: cat APIs are + * only intended for human consumption using the command line or Kibana console. + * They are not intended for use by applications. For application consumption, + * use the nodes info API. * * @see Documentation @@ -908,10 +905,10 @@ public CompletableFuture nodeattrs(NodeattrsRequest request) } /** - * Get node attribute information. Get information about custom node attributes. - * IMPORTANT: cat APIs are only intended for human consumption using the command - * line or Kibana console. They are not intended for use by applications. For - * application consumption, use the nodes info API. + * Returns information about custom node attributes. IMPORTANT: cat APIs are + * only intended for human consumption using the command line or Kibana console. + * They are not intended for use by applications. For application consumption, + * use the nodes info API. * * @param fn * a function that initializes a builder to create the @@ -927,10 +924,10 @@ public final CompletableFuture nodeattrs( } /** - * Get node attribute information. Get information about custom node attributes. - * IMPORTANT: cat APIs are only intended for human consumption using the command - * line or Kibana console. They are not intended for use by applications. For - * application consumption, use the nodes info API. + * Returns information about custom node attributes. IMPORTANT: cat APIs are + * only intended for human consumption using the command line or Kibana console. + * They are not intended for use by applications. For application consumption, + * use the nodes info API. * * @see Documentation @@ -945,10 +942,10 @@ public CompletableFuture nodeattrs() { // ----- Endpoint: cat.nodes /** - * Get node information. Get information about the nodes in a cluster. - * IMPORTANT: cat APIs are only intended for human consumption using the command - * line or Kibana console. They are not intended for use by applications. For - * application consumption, use the nodes info API. + * Returns information about the nodes in a cluster. IMPORTANT: cat APIs are + * only intended for human consumption using the command line or Kibana console. + * They are not intended for use by applications. For application consumption, + * use the nodes info API. * * @see Documentation @@ -963,10 +960,10 @@ public CompletableFuture nodes(NodesRequest request) { } /** - * Get node information. Get information about the nodes in a cluster. - * IMPORTANT: cat APIs are only intended for human consumption using the command - * line or Kibana console. They are not intended for use by applications. For - * application consumption, use the nodes info API. + * Returns information about the nodes in a cluster. IMPORTANT: cat APIs are + * only intended for human consumption using the command line or Kibana console. + * They are not intended for use by applications. For application consumption, + * use the nodes info API. * * @param fn * a function that initializes a builder to create the @@ -982,10 +979,10 @@ public final CompletableFuture nodes( } /** - * Get node information. Get information about the nodes in a cluster. - * IMPORTANT: cat APIs are only intended for human consumption using the command - * line or Kibana console. They are not intended for use by applications. For - * application consumption, use the nodes info API. + * Returns information about the nodes in a cluster. IMPORTANT: cat APIs are + * only intended for human consumption using the command line or Kibana console. + * They are not intended for use by applications. For application consumption, + * use the nodes info API. * * @see Documentation @@ -1000,11 +997,10 @@ public CompletableFuture nodes() { // ----- Endpoint: cat.pending_tasks /** - * Get pending task information. Get information about cluster-level changes - * that have not yet taken effect. IMPORTANT: cat APIs are only intended for - * human consumption using the command line or Kibana console. They are not - * intended for use by applications. For application consumption, use the - * pending cluster tasks API. + * Returns cluster-level changes that have not yet been executed. IMPORTANT: cat + * APIs are only intended for human consumption using the command line or Kibana + * console. They are not intended for use by applications. For application + * consumption, use the pending cluster tasks API. * * @see Documentation @@ -1019,11 +1015,10 @@ public CompletableFuture pendingTasks(PendingTasksRequest } /** - * Get pending task information. Get information about cluster-level changes - * that have not yet taken effect. IMPORTANT: cat APIs are only intended for - * human consumption using the command line or Kibana console. They are not - * intended for use by applications. For application consumption, use the - * pending cluster tasks API. + * Returns cluster-level changes that have not yet been executed. IMPORTANT: cat + * APIs are only intended for human consumption using the command line or Kibana + * console. They are not intended for use by applications. For application + * consumption, use the pending cluster tasks API. * * @param fn * a function that initializes a builder to create the @@ -1039,11 +1034,10 @@ public final CompletableFuture pendingTasks( } /** - * Get pending task information. Get information about cluster-level changes - * that have not yet taken effect. IMPORTANT: cat APIs are only intended for - * human consumption using the command line or Kibana console. They are not - * intended for use by applications. For application consumption, use the - * pending cluster tasks API. + * Returns cluster-level changes that have not yet been executed. IMPORTANT: cat + * APIs are only intended for human consumption using the command line or Kibana + * console. They are not intended for use by applications. For application + * consumption, use the pending cluster tasks API. * * @see Documentation @@ -1058,10 +1052,10 @@ public CompletableFuture pendingTasks() { // ----- Endpoint: cat.plugins /** - * Get plugin information. Get a list of plugins running on each node of a - * cluster. IMPORTANT: cat APIs are only intended for human consumption using - * the command line or Kibana console. They are not intended for use by - * applications. For application consumption, use the nodes info API. + * Returns a list of plugins running on each node of a cluster. IMPORTANT: cat + * APIs are only intended for human consumption using the command line or Kibana + * console. They are not intended for use by applications. For application + * consumption, use the nodes info API. * * @see Documentation @@ -1076,10 +1070,10 @@ public CompletableFuture plugins(PluginsRequest request) { } /** - * Get plugin information. Get a list of plugins running on each node of a - * cluster. IMPORTANT: cat APIs are only intended for human consumption using - * the command line or Kibana console. They are not intended for use by - * applications. For application consumption, use the nodes info API. + * Returns a list of plugins running on each node of a cluster. IMPORTANT: cat + * APIs are only intended for human consumption using the command line or Kibana + * console. They are not intended for use by applications. For application + * consumption, use the nodes info API. * * @param fn * a function that initializes a builder to create the @@ -1095,10 +1089,10 @@ public final CompletableFuture plugins( } /** - * Get plugin information. Get a list of plugins running on each node of a - * cluster. IMPORTANT: cat APIs are only intended for human consumption using - * the command line or Kibana console. They are not intended for use by - * applications. For application consumption, use the nodes info API. + * Returns a list of plugins running on each node of a cluster. IMPORTANT: cat + * APIs are only intended for human consumption using the command line or Kibana + * console. They are not intended for use by applications. For application + * consumption, use the nodes info API. * * @see Documentation @@ -1113,15 +1107,15 @@ public CompletableFuture plugins() { // ----- Endpoint: cat.recovery /** - * Get shard recovery information. Get information about ongoing and completed - * shard recoveries. Shard recovery is the process of initializing a shard copy, - * such as restoring a primary shard from a snapshot or syncing a replica shard - * from a primary shard. When a shard recovery completes, the recovered shard is - * available for search and indexing. For data streams, the API returns - * information about the stream’s backing indices. IMPORTANT: cat APIs are only - * intended for human consumption using the command line or Kibana console. They - * are not intended for use by applications. For application consumption, use - * the index recovery API. + * Returns information about ongoing and completed shard recoveries. Shard + * recovery is the process of initializing a shard copy, such as restoring a + * primary shard from a snapshot or syncing a replica shard from a primary + * shard. When a shard recovery completes, the recovered shard is available for + * search and indexing. For data streams, the API returns information about the + * stream’s backing indices. IMPORTANT: cat APIs are only intended for human + * consumption using the command line or Kibana console. They are not intended + * for use by applications. For application consumption, use the index recovery + * API. * * @see Documentation @@ -1136,15 +1130,15 @@ public CompletableFuture recovery(RecoveryRequest request) { } /** - * Get shard recovery information. Get information about ongoing and completed - * shard recoveries. Shard recovery is the process of initializing a shard copy, - * such as restoring a primary shard from a snapshot or syncing a replica shard - * from a primary shard. When a shard recovery completes, the recovered shard is - * available for search and indexing. For data streams, the API returns - * information about the stream’s backing indices. IMPORTANT: cat APIs are only - * intended for human consumption using the command line or Kibana console. They - * are not intended for use by applications. For application consumption, use - * the index recovery API. + * Returns information about ongoing and completed shard recoveries. Shard + * recovery is the process of initializing a shard copy, such as restoring a + * primary shard from a snapshot or syncing a replica shard from a primary + * shard. When a shard recovery completes, the recovered shard is available for + * search and indexing. For data streams, the API returns information about the + * stream’s backing indices. IMPORTANT: cat APIs are only intended for human + * consumption using the command line or Kibana console. They are not intended + * for use by applications. For application consumption, use the index recovery + * API. * * @param fn * a function that initializes a builder to create the @@ -1160,15 +1154,15 @@ public final CompletableFuture recovery( } /** - * Get shard recovery information. Get information about ongoing and completed - * shard recoveries. Shard recovery is the process of initializing a shard copy, - * such as restoring a primary shard from a snapshot or syncing a replica shard - * from a primary shard. When a shard recovery completes, the recovered shard is - * available for search and indexing. For data streams, the API returns - * information about the stream’s backing indices. IMPORTANT: cat APIs are only - * intended for human consumption using the command line or Kibana console. They - * are not intended for use by applications. For application consumption, use - * the index recovery API. + * Returns information about ongoing and completed shard recoveries. Shard + * recovery is the process of initializing a shard copy, such as restoring a + * primary shard from a snapshot or syncing a replica shard from a primary + * shard. When a shard recovery completes, the recovered shard is available for + * search and indexing. For data streams, the API returns information about the + * stream’s backing indices. IMPORTANT: cat APIs are only intended for human + * consumption using the command line or Kibana console. They are not intended + * for use by applications. For application consumption, use the index recovery + * API. * * @see Documentation @@ -1183,29 +1177,66 @@ public CompletableFuture recovery() { // ----- Endpoint: cat.repositories /** - * Get snapshot repository information. Get a list of snapshot repositories for - * a cluster. IMPORTANT: cat APIs are only intended for human consumption using - * the command line or Kibana console. They are not intended for use by - * applications. For application consumption, use the get snapshot repository - * API. + * Returns the snapshot repositories for a cluster. IMPORTANT: cat APIs are only + * intended for human consumption using the command line or Kibana console. They + * are not intended for use by applications. For application consumption, use + * the get snapshot repository API. + * + * @see Documentation + * on elastic.co + */ + + public CompletableFuture repositories(RepositoriesRequest request) { + @SuppressWarnings("unchecked") + JsonEndpoint endpoint = (JsonEndpoint) RepositoriesRequest._ENDPOINT; + + return this.transport.performRequestAsync(request, endpoint, this.transportOptions); + } + + /** + * Returns the snapshot repositories for a cluster. IMPORTANT: cat APIs are only + * intended for human consumption using the command line or Kibana console. They + * are not intended for use by applications. For application consumption, use + * the get snapshot repository API. + * + * @param fn + * a function that initializes a builder to create the + * {@link RepositoriesRequest} + * @see Documentation + * on elastic.co + */ + + public final CompletableFuture repositories( + Function> fn) { + return repositories(fn.apply(new RepositoriesRequest.Builder()).build()); + } + + /** + * Returns the snapshot repositories for a cluster. IMPORTANT: cat APIs are only + * intended for human consumption using the command line or Kibana console. They + * are not intended for use by applications. For application consumption, use + * the get snapshot repository API. * * @see Documentation * on elastic.co */ + public CompletableFuture repositories() { - return this.transport.performRequestAsync(RepositoriesRequest._INSTANCE, RepositoriesRequest._ENDPOINT, - this.transportOptions); + return this.transport.performRequestAsync(new RepositoriesRequest.Builder().build(), + RepositoriesRequest._ENDPOINT, this.transportOptions); } // ----- Endpoint: cat.segments /** - * Get segment information. Get low-level information about the Lucene segments - * in index shards. For data streams, the API returns information about the - * backing indices. IMPORTANT: cat APIs are only intended for human consumption - * using the command line or Kibana console. They are not intended for use by - * applications. For application consumption, use the index segments API. + * Returns low-level information about the Lucene segments in index shards. For + * data streams, the API returns information about the backing indices. + * IMPORTANT: cat APIs are only intended for human consumption using the command + * line or Kibana console. They are not intended for use by applications. For + * application consumption, use the index segments API. * * @see Documentation @@ -1220,11 +1251,11 @@ public CompletableFuture segments(SegmentsRequest request) { } /** - * Get segment information. Get low-level information about the Lucene segments - * in index shards. For data streams, the API returns information about the - * backing indices. IMPORTANT: cat APIs are only intended for human consumption - * using the command line or Kibana console. They are not intended for use by - * applications. For application consumption, use the index segments API. + * Returns low-level information about the Lucene segments in index shards. For + * data streams, the API returns information about the backing indices. + * IMPORTANT: cat APIs are only intended for human consumption using the command + * line or Kibana console. They are not intended for use by applications. For + * application consumption, use the index segments API. * * @param fn * a function that initializes a builder to create the @@ -1240,11 +1271,11 @@ public final CompletableFuture segments( } /** - * Get segment information. Get low-level information about the Lucene segments - * in index shards. For data streams, the API returns information about the - * backing indices. IMPORTANT: cat APIs are only intended for human consumption - * using the command line or Kibana console. They are not intended for use by - * applications. For application consumption, use the index segments API. + * Returns low-level information about the Lucene segments in index shards. For + * data streams, the API returns information about the backing indices. + * IMPORTANT: cat APIs are only intended for human consumption using the command + * line or Kibana console. They are not intended for use by applications. For + * application consumption, use the index segments API. * * @see Documentation @@ -1259,10 +1290,10 @@ public CompletableFuture segments() { // ----- Endpoint: cat.shards /** - * Get shard information. Get information about the shards in a cluster. For - * data streams, the API returns information about the backing indices. - * IMPORTANT: cat APIs are only intended for human consumption using the command - * line or Kibana console. They are not intended for use by applications. + * Returns information about the shards in a cluster. For data streams, the API + * returns information about the backing indices. IMPORTANT: cat APIs are only + * intended for human consumption using the command line or Kibana console. They + * are not intended for use by applications. * * @see Documentation @@ -1277,10 +1308,10 @@ public CompletableFuture shards(ShardsRequest request) { } /** - * Get shard information. Get information about the shards in a cluster. For - * data streams, the API returns information about the backing indices. - * IMPORTANT: cat APIs are only intended for human consumption using the command - * line or Kibana console. They are not intended for use by applications. + * Returns information about the shards in a cluster. For data streams, the API + * returns information about the backing indices. IMPORTANT: cat APIs are only + * intended for human consumption using the command line or Kibana console. They + * are not intended for use by applications. * * @param fn * a function that initializes a builder to create the @@ -1296,10 +1327,10 @@ public final CompletableFuture shards( } /** - * Get shard information. Get information about the shards in a cluster. For - * data streams, the API returns information about the backing indices. - * IMPORTANT: cat APIs are only intended for human consumption using the command - * line or Kibana console. They are not intended for use by applications. + * Returns information about the shards in a cluster. For data streams, the API + * returns information about the backing indices. IMPORTANT: cat APIs are only + * intended for human consumption using the command line or Kibana console. They + * are not intended for use by applications. * * @see Documentation @@ -1314,12 +1345,11 @@ public CompletableFuture shards() { // ----- Endpoint: cat.snapshots /** - * Get snapshot information Get information about the snapshots stored in one or - * more repositories. A snapshot is a backup of an index or running - * Elasticsearch cluster. IMPORTANT: cat APIs are only intended for human - * consumption using the command line or Kibana console. They are not intended - * for use by applications. For application consumption, use the get snapshot - * API. + * Returns information about the snapshots stored in one or more repositories. A + * snapshot is a backup of an index or running Elasticsearch cluster. IMPORTANT: + * cat APIs are only intended for human consumption using the command line or + * Kibana console. They are not intended for use by applications. For + * application consumption, use the get snapshot API. * * @see Documentation @@ -1334,12 +1364,11 @@ public CompletableFuture snapshots(SnapshotsRequest request) } /** - * Get snapshot information Get information about the snapshots stored in one or - * more repositories. A snapshot is a backup of an index or running - * Elasticsearch cluster. IMPORTANT: cat APIs are only intended for human - * consumption using the command line or Kibana console. They are not intended - * for use by applications. For application consumption, use the get snapshot - * API. + * Returns information about the snapshots stored in one or more repositories. A + * snapshot is a backup of an index or running Elasticsearch cluster. IMPORTANT: + * cat APIs are only intended for human consumption using the command line or + * Kibana console. They are not intended for use by applications. For + * application consumption, use the get snapshot API. * * @param fn * a function that initializes a builder to create the @@ -1355,12 +1384,11 @@ public final CompletableFuture snapshots( } /** - * Get snapshot information Get information about the snapshots stored in one or - * more repositories. A snapshot is a backup of an index or running - * Elasticsearch cluster. IMPORTANT: cat APIs are only intended for human - * consumption using the command line or Kibana console. They are not intended - * for use by applications. For application consumption, use the get snapshot - * API. + * Returns information about the snapshots stored in one or more repositories. A + * snapshot is a backup of an index or running Elasticsearch cluster. IMPORTANT: + * cat APIs are only intended for human consumption using the command line or + * Kibana console. They are not intended for use by applications. For + * application consumption, use the get snapshot API. * * @see Documentation @@ -1375,10 +1403,10 @@ public CompletableFuture snapshots() { // ----- Endpoint: cat.tasks /** - * Get task information. Get information about tasks currently running in the - * cluster. IMPORTANT: cat APIs are only intended for human consumption using - * the command line or Kibana console. They are not intended for use by - * applications. For application consumption, use the task management API. + * Returns information about tasks currently executing in the cluster. + * IMPORTANT: cat APIs are only intended for human consumption using the command + * line or Kibana console. They are not intended for use by applications. For + * application consumption, use the task management API. * * @see Documentation @@ -1393,10 +1421,10 @@ public CompletableFuture tasks(TasksRequest request) { } /** - * Get task information. Get information about tasks currently running in the - * cluster. IMPORTANT: cat APIs are only intended for human consumption using - * the command line or Kibana console. They are not intended for use by - * applications. For application consumption, use the task management API. + * Returns information about tasks currently executing in the cluster. + * IMPORTANT: cat APIs are only intended for human consumption using the command + * line or Kibana console. They are not intended for use by applications. For + * application consumption, use the task management API. * * @param fn * a function that initializes a builder to create the @@ -1412,10 +1440,10 @@ public final CompletableFuture tasks( } /** - * Get task information. Get information about tasks currently running in the - * cluster. IMPORTANT: cat APIs are only intended for human consumption using - * the command line or Kibana console. They are not intended for use by - * applications. For application consumption, use the task management API. + * Returns information about tasks currently executing in the cluster. + * IMPORTANT: cat APIs are only intended for human consumption using the command + * line or Kibana console. They are not intended for use by applications. For + * application consumption, use the task management API. * * @see Documentation @@ -1430,12 +1458,11 @@ public CompletableFuture tasks() { // ----- Endpoint: cat.templates /** - * Get index template information. Get information about the index templates in - * a cluster. You can use index templates to apply index settings and field - * mappings to new indices at creation. IMPORTANT: cat APIs are only intended - * for human consumption using the command line or Kibana console. They are not - * intended for use by applications. For application consumption, use the get - * index template API. + * Returns information about index templates in a cluster. You can use index + * templates to apply index settings and field mappings to new indices at + * creation. IMPORTANT: cat APIs are only intended for human consumption using + * the command line or Kibana console. They are not intended for use by + * applications. For application consumption, use the get index template API. * * @see Documentation @@ -1450,12 +1477,11 @@ public CompletableFuture templates(TemplatesRequest request) } /** - * Get index template information. Get information about the index templates in - * a cluster. You can use index templates to apply index settings and field - * mappings to new indices at creation. IMPORTANT: cat APIs are only intended - * for human consumption using the command line or Kibana console. They are not - * intended for use by applications. For application consumption, use the get - * index template API. + * Returns information about index templates in a cluster. You can use index + * templates to apply index settings and field mappings to new indices at + * creation. IMPORTANT: cat APIs are only intended for human consumption using + * the command line or Kibana console. They are not intended for use by + * applications. For application consumption, use the get index template API. * * @param fn * a function that initializes a builder to create the @@ -1471,12 +1497,11 @@ public final CompletableFuture templates( } /** - * Get index template information. Get information about the index templates in - * a cluster. You can use index templates to apply index settings and field - * mappings to new indices at creation. IMPORTANT: cat APIs are only intended - * for human consumption using the command line or Kibana console. They are not - * intended for use by applications. For application consumption, use the get - * index template API. + * Returns information about index templates in a cluster. You can use index + * templates to apply index settings and field mappings to new indices at + * creation. IMPORTANT: cat APIs are only intended for human consumption using + * the command line or Kibana console. They are not intended for use by + * applications. For application consumption, use the get index template API. * * @see Documentation @@ -1491,11 +1516,11 @@ public CompletableFuture templates() { // ----- Endpoint: cat.thread_pool /** - * Get thread pool statistics. Get thread pool statistics for each node in a - * cluster. Returned information includes all built-in thread pools and custom - * thread pools. IMPORTANT: cat APIs are only intended for human consumption - * using the command line or Kibana console. They are not intended for use by - * applications. For application consumption, use the nodes info API. + * Returns thread pool statistics for each node in a cluster. Returned + * information includes all built-in thread pools and custom thread pools. + * IMPORTANT: cat APIs are only intended for human consumption using the command + * line or Kibana console. They are not intended for use by applications. For + * application consumption, use the nodes info API. * * @see Documentation @@ -1510,11 +1535,11 @@ public CompletableFuture threadPool(ThreadPoolRequest reques } /** - * Get thread pool statistics. Get thread pool statistics for each node in a - * cluster. Returned information includes all built-in thread pools and custom - * thread pools. IMPORTANT: cat APIs are only intended for human consumption - * using the command line or Kibana console. They are not intended for use by - * applications. For application consumption, use the nodes info API. + * Returns thread pool statistics for each node in a cluster. Returned + * information includes all built-in thread pools and custom thread pools. + * IMPORTANT: cat APIs are only intended for human consumption using the command + * line or Kibana console. They are not intended for use by applications. For + * application consumption, use the nodes info API. * * @param fn * a function that initializes a builder to create the @@ -1530,11 +1555,11 @@ public final CompletableFuture threadPool( } /** - * Get thread pool statistics. Get thread pool statistics for each node in a - * cluster. Returned information includes all built-in thread pools and custom - * thread pools. IMPORTANT: cat APIs are only intended for human consumption - * using the command line or Kibana console. They are not intended for use by - * applications. For application consumption, use the nodes info API. + * Returns thread pool statistics for each node in a cluster. Returned + * information includes all built-in thread pools and custom thread pools. + * IMPORTANT: cat APIs are only intended for human consumption using the command + * line or Kibana console. They are not intended for use by applications. For + * application consumption, use the nodes info API. * * @see Documentation @@ -1549,8 +1574,7 @@ public CompletableFuture threadPool() { // ----- Endpoint: cat.transforms /** - * Get transform information. Get configuration and usage information about - * transforms. + * Get transforms. Returns configuration and usage information about transforms. *

* CAT APIs are only intended for human consumption using the Kibana console or * command line. They are not intended for use by applications. For application @@ -1569,8 +1593,7 @@ public CompletableFuture transforms(TransformsRequest reques } /** - * Get transform information. Get configuration and usage information about - * transforms. + * Get transforms. Returns configuration and usage information about transforms. *

* CAT APIs are only intended for human consumption using the Kibana console or * command line. They are not intended for use by applications. For application @@ -1590,8 +1613,7 @@ public final CompletableFuture transforms( } /** - * Get transform information. Get configuration and usage information about - * transforms. + * Get transforms. Returns configuration and usage information about transforms. *

* CAT APIs are only intended for human consumption using the Kibana console or * command line. They are not intended for use by applications. For application diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/ElasticsearchCatClient.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/ElasticsearchCatClient.java index 5a7d5602b..53001f7be 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/ElasticsearchCatClient.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/ElasticsearchCatClient.java @@ -129,10 +129,10 @@ public AliasesResponse aliases() throws IOException, ElasticsearchException { // ----- Endpoint: cat.allocation /** - * Get shard allocation information. Get a snapshot of the number of shards - * allocated to each data node and their disk space. IMPORTANT: cat APIs are - * only intended for human consumption using the command line or Kibana console. - * They are not intended for use by applications. + * Provides a snapshot of the number of shards allocated to each data node and + * their disk space. IMPORTANT: cat APIs are only intended for human consumption + * using the command line or Kibana console. They are not intended for use by + * applications. * * @see Documentation @@ -147,10 +147,10 @@ public AllocationResponse allocation(AllocationRequest request) throws IOExcepti } /** - * Get shard allocation information. Get a snapshot of the number of shards - * allocated to each data node and their disk space. IMPORTANT: cat APIs are - * only intended for human consumption using the command line or Kibana console. - * They are not intended for use by applications. + * Provides a snapshot of the number of shards allocated to each data node and + * their disk space. IMPORTANT: cat APIs are only intended for human consumption + * using the command line or Kibana console. They are not intended for use by + * applications. * * @param fn * a function that initializes a builder to create the @@ -166,10 +166,10 @@ public final AllocationResponse allocation(FunctionDocumentation @@ -317,11 +317,11 @@ public CountResponse count() throws IOException, ElasticsearchException { // ----- Endpoint: cat.fielddata /** - * Get field data cache information. Get the amount of heap memory currently - * used by the field data cache on every data node in the cluster. IMPORTANT: - * cat APIs are only intended for human consumption using the command line or - * Kibana console. They are not intended for use by applications. For - * application consumption, use the nodes stats API. + * Returns the amount of heap memory currently used by the field data cache on + * every data node in the cluster. IMPORTANT: cat APIs are only intended for + * human consumption using the command line or Kibana console. They are not + * intended for use by applications. For application consumption, use the nodes + * stats API. * * @see Documentation @@ -336,11 +336,11 @@ public FielddataResponse fielddata(FielddataRequest request) throws IOException, } /** - * Get field data cache information. Get the amount of heap memory currently - * used by the field data cache on every data node in the cluster. IMPORTANT: - * cat APIs are only intended for human consumption using the command line or - * Kibana console. They are not intended for use by applications. For - * application consumption, use the nodes stats API. + * Returns the amount of heap memory currently used by the field data cache on + * every data node in the cluster. IMPORTANT: cat APIs are only intended for + * human consumption using the command line or Kibana console. They are not + * intended for use by applications. For application consumption, use the nodes + * stats API. * * @param fn * a function that initializes a builder to create the @@ -356,11 +356,11 @@ public final FielddataResponse fielddata(FunctionDocumentation @@ -375,18 +375,18 @@ public FielddataResponse fielddata() throws IOException, ElasticsearchException // ----- Endpoint: cat.health /** - * Get the cluster health status. IMPORTANT: cat APIs are only intended for - * human consumption using the command line or Kibana console. They are not - * intended for use by applications. For application consumption, use the - * cluster health API. This API is often used to check malfunctioning clusters. - * To help you track cluster health alongside log files and alerting systems, - * the API returns timestamps in two formats: HH:MM:SS, which is - * human-readable but includes no date information; - * Unix epoch time, which is machine-sortable and includes date - * information. The latter format is useful for cluster recoveries that take - * multiple days. You can use the cat health API to verify cluster health across - * multiple nodes. You also can use the API to track the recovery of a large - * cluster over a longer period of time. + * Returns the health status of a cluster, similar to the cluster health API. + * IMPORTANT: cat APIs are only intended for human consumption using the command + * line or Kibana console. They are not intended for use by applications. For + * application consumption, use the cluster health API. This API is often used + * to check malfunctioning clusters. To help you track cluster health alongside + * log files and alerting systems, the API returns timestamps in two formats: + * HH:MM:SS, which is human-readable but includes no date + * information; Unix epoch time, which is machine-sortable and + * includes date information. The latter format is useful for cluster recoveries + * that take multiple days. You can use the cat health API to verify cluster + * health across multiple nodes. You also can use the API to track the recovery + * of a large cluster over a longer period of time. * * @see Documentation @@ -401,18 +401,18 @@ public HealthResponse health(HealthRequest request) throws IOException, Elastics } /** - * Get the cluster health status. IMPORTANT: cat APIs are only intended for - * human consumption using the command line or Kibana console. They are not - * intended for use by applications. For application consumption, use the - * cluster health API. This API is often used to check malfunctioning clusters. - * To help you track cluster health alongside log files and alerting systems, - * the API returns timestamps in two formats: HH:MM:SS, which is - * human-readable but includes no date information; - * Unix epoch time, which is machine-sortable and includes date - * information. The latter format is useful for cluster recoveries that take - * multiple days. You can use the cat health API to verify cluster health across - * multiple nodes. You also can use the API to track the recovery of a large - * cluster over a longer period of time. + * Returns the health status of a cluster, similar to the cluster health API. + * IMPORTANT: cat APIs are only intended for human consumption using the command + * line or Kibana console. They are not intended for use by applications. For + * application consumption, use the cluster health API. This API is often used + * to check malfunctioning clusters. To help you track cluster health alongside + * log files and alerting systems, the API returns timestamps in two formats: + * HH:MM:SS, which is human-readable but includes no date + * information; Unix epoch time, which is machine-sortable and + * includes date information. The latter format is useful for cluster recoveries + * that take multiple days. You can use the cat health API to verify cluster + * health across multiple nodes. You also can use the API to track the recovery + * of a large cluster over a longer period of time. * * @param fn * a function that initializes a builder to create the @@ -428,18 +428,18 @@ public final HealthResponse health(FunctionHH:MM:SS, which is - * human-readable but includes no date information; - * Unix epoch time, which is machine-sortable and includes date - * information. The latter format is useful for cluster recoveries that take - * multiple days. You can use the cat health API to verify cluster health across - * multiple nodes. You also can use the API to track the recovery of a large - * cluster over a longer period of time. + * Returns the health status of a cluster, similar to the cluster health API. + * IMPORTANT: cat APIs are only intended for human consumption using the command + * line or Kibana console. They are not intended for use by applications. For + * application consumption, use the cluster health API. This API is often used + * to check malfunctioning clusters. To help you track cluster health alongside + * log files and alerting systems, the API returns timestamps in two formats: + * HH:MM:SS, which is human-readable but includes no date + * information; Unix epoch time, which is machine-sortable and + * includes date information. The latter format is useful for cluster recoveries + * that take multiple days. You can use the cat health API to verify cluster + * health across multiple nodes. You also can use the API to track the recovery + * of a large cluster over a longer period of time. * * @see Documentation @@ -573,11 +573,10 @@ public IndicesResponse indices() throws IOException, ElasticsearchException { // ----- Endpoint: cat.master /** - * Get master node information. Get information about the master node, including - * the ID, bound IP address, and name. IMPORTANT: cat APIs are only intended for - * human consumption using the command line or Kibana console. They are not - * intended for use by applications. For application consumption, use the nodes - * info API. + * Returns information about the master node, including the ID, bound IP + * address, and name. IMPORTANT: cat APIs are only intended for human + * consumption using the command line or Kibana console. They are not intended + * for use by applications. For application consumption, use the nodes info API. * * @see Documentation @@ -592,11 +591,10 @@ public MasterResponse master(MasterRequest request) throws IOException, Elastics } /** - * Get master node information. Get information about the master node, including - * the ID, bound IP address, and name. IMPORTANT: cat APIs are only intended for - * human consumption using the command line or Kibana console. They are not - * intended for use by applications. For application consumption, use the nodes - * info API. + * Returns information about the master node, including the ID, bound IP + * address, and name. IMPORTANT: cat APIs are only intended for human + * consumption using the command line or Kibana console. They are not intended + * for use by applications. For application consumption, use the nodes info API. * * @param fn * a function that initializes a builder to create the @@ -612,11 +610,10 @@ public final MasterResponse master(FunctionDocumentation @@ -898,10 +895,10 @@ public MlTrainedModelsResponse mlTrainedModels() throws IOException, Elasticsear // ----- Endpoint: cat.nodeattrs /** - * Get node attribute information. Get information about custom node attributes. - * IMPORTANT: cat APIs are only intended for human consumption using the command - * line or Kibana console. They are not intended for use by applications. For - * application consumption, use the nodes info API. + * Returns information about custom node attributes. IMPORTANT: cat APIs are + * only intended for human consumption using the command line or Kibana console. + * They are not intended for use by applications. For application consumption, + * use the nodes info API. * * @see Documentation @@ -916,10 +913,10 @@ public NodeattrsResponse nodeattrs(NodeattrsRequest request) throws IOException, } /** - * Get node attribute information. Get information about custom node attributes. - * IMPORTANT: cat APIs are only intended for human consumption using the command - * line or Kibana console. They are not intended for use by applications. For - * application consumption, use the nodes info API. + * Returns information about custom node attributes. IMPORTANT: cat APIs are + * only intended for human consumption using the command line or Kibana console. + * They are not intended for use by applications. For application consumption, + * use the nodes info API. * * @param fn * a function that initializes a builder to create the @@ -935,10 +932,10 @@ public final NodeattrsResponse nodeattrs(FunctionDocumentation @@ -953,10 +950,10 @@ public NodeattrsResponse nodeattrs() throws IOException, ElasticsearchException // ----- Endpoint: cat.nodes /** - * Get node information. Get information about the nodes in a cluster. - * IMPORTANT: cat APIs are only intended for human consumption using the command - * line or Kibana console. They are not intended for use by applications. For - * application consumption, use the nodes info API. + * Returns information about the nodes in a cluster. IMPORTANT: cat APIs are + * only intended for human consumption using the command line or Kibana console. + * They are not intended for use by applications. For application consumption, + * use the nodes info API. * * @see Documentation @@ -971,10 +968,10 @@ public NodesResponse nodes(NodesRequest request) throws IOException, Elasticsear } /** - * Get node information. Get information about the nodes in a cluster. - * IMPORTANT: cat APIs are only intended for human consumption using the command - * line or Kibana console. They are not intended for use by applications. For - * application consumption, use the nodes info API. + * Returns information about the nodes in a cluster. IMPORTANT: cat APIs are + * only intended for human consumption using the command line or Kibana console. + * They are not intended for use by applications. For application consumption, + * use the nodes info API. * * @param fn * a function that initializes a builder to create the @@ -990,10 +987,10 @@ public final NodesResponse nodes(FunctionDocumentation @@ -1008,11 +1005,10 @@ public NodesResponse nodes() throws IOException, ElasticsearchException { // ----- Endpoint: cat.pending_tasks /** - * Get pending task information. Get information about cluster-level changes - * that have not yet taken effect. IMPORTANT: cat APIs are only intended for - * human consumption using the command line or Kibana console. They are not - * intended for use by applications. For application consumption, use the - * pending cluster tasks API. + * Returns cluster-level changes that have not yet been executed. IMPORTANT: cat + * APIs are only intended for human consumption using the command line or Kibana + * console. They are not intended for use by applications. For application + * consumption, use the pending cluster tasks API. * * @see Documentation @@ -1027,11 +1023,10 @@ public PendingTasksResponse pendingTasks(PendingTasksRequest request) throws IOE } /** - * Get pending task information. Get information about cluster-level changes - * that have not yet taken effect. IMPORTANT: cat APIs are only intended for - * human consumption using the command line or Kibana console. They are not - * intended for use by applications. For application consumption, use the - * pending cluster tasks API. + * Returns cluster-level changes that have not yet been executed. IMPORTANT: cat + * APIs are only intended for human consumption using the command line or Kibana + * console. They are not intended for use by applications. For application + * consumption, use the pending cluster tasks API. * * @param fn * a function that initializes a builder to create the @@ -1048,11 +1043,10 @@ public final PendingTasksResponse pendingTasks( } /** - * Get pending task information. Get information about cluster-level changes - * that have not yet taken effect. IMPORTANT: cat APIs are only intended for - * human consumption using the command line or Kibana console. They are not - * intended for use by applications. For application consumption, use the - * pending cluster tasks API. + * Returns cluster-level changes that have not yet been executed. IMPORTANT: cat + * APIs are only intended for human consumption using the command line or Kibana + * console. They are not intended for use by applications. For application + * consumption, use the pending cluster tasks API. * * @see Documentation @@ -1067,10 +1061,10 @@ public PendingTasksResponse pendingTasks() throws IOException, ElasticsearchExce // ----- Endpoint: cat.plugins /** - * Get plugin information. Get a list of plugins running on each node of a - * cluster. IMPORTANT: cat APIs are only intended for human consumption using - * the command line or Kibana console. They are not intended for use by - * applications. For application consumption, use the nodes info API. + * Returns a list of plugins running on each node of a cluster. IMPORTANT: cat + * APIs are only intended for human consumption using the command line or Kibana + * console. They are not intended for use by applications. For application + * consumption, use the nodes info API. * * @see Documentation @@ -1085,10 +1079,10 @@ public PluginsResponse plugins(PluginsRequest request) throws IOException, Elast } /** - * Get plugin information. Get a list of plugins running on each node of a - * cluster. IMPORTANT: cat APIs are only intended for human consumption using - * the command line or Kibana console. They are not intended for use by - * applications. For application consumption, use the nodes info API. + * Returns a list of plugins running on each node of a cluster. IMPORTANT: cat + * APIs are only intended for human consumption using the command line or Kibana + * console. They are not intended for use by applications. For application + * consumption, use the nodes info API. * * @param fn * a function that initializes a builder to create the @@ -1104,10 +1098,10 @@ public final PluginsResponse plugins(FunctionDocumentation @@ -1122,15 +1116,15 @@ public PluginsResponse plugins() throws IOException, ElasticsearchException { // ----- Endpoint: cat.recovery /** - * Get shard recovery information. Get information about ongoing and completed - * shard recoveries. Shard recovery is the process of initializing a shard copy, - * such as restoring a primary shard from a snapshot or syncing a replica shard - * from a primary shard. When a shard recovery completes, the recovered shard is - * available for search and indexing. For data streams, the API returns - * information about the stream’s backing indices. IMPORTANT: cat APIs are only - * intended for human consumption using the command line or Kibana console. They - * are not intended for use by applications. For application consumption, use - * the index recovery API. + * Returns information about ongoing and completed shard recoveries. Shard + * recovery is the process of initializing a shard copy, such as restoring a + * primary shard from a snapshot or syncing a replica shard from a primary + * shard. When a shard recovery completes, the recovered shard is available for + * search and indexing. For data streams, the API returns information about the + * stream’s backing indices. IMPORTANT: cat APIs are only intended for human + * consumption using the command line or Kibana console. They are not intended + * for use by applications. For application consumption, use the index recovery + * API. * * @see Documentation @@ -1145,15 +1139,15 @@ public RecoveryResponse recovery(RecoveryRequest request) throws IOException, El } /** - * Get shard recovery information. Get information about ongoing and completed - * shard recoveries. Shard recovery is the process of initializing a shard copy, - * such as restoring a primary shard from a snapshot or syncing a replica shard - * from a primary shard. When a shard recovery completes, the recovered shard is - * available for search and indexing. For data streams, the API returns - * information about the stream’s backing indices. IMPORTANT: cat APIs are only - * intended for human consumption using the command line or Kibana console. They - * are not intended for use by applications. For application consumption, use - * the index recovery API. + * Returns information about ongoing and completed shard recoveries. Shard + * recovery is the process of initializing a shard copy, such as restoring a + * primary shard from a snapshot or syncing a replica shard from a primary + * shard. When a shard recovery completes, the recovered shard is available for + * search and indexing. For data streams, the API returns information about the + * stream’s backing indices. IMPORTANT: cat APIs are only intended for human + * consumption using the command line or Kibana console. They are not intended + * for use by applications. For application consumption, use the index recovery + * API. * * @param fn * a function that initializes a builder to create the @@ -1169,15 +1163,15 @@ public final RecoveryResponse recovery(FunctionDocumentation @@ -1192,29 +1186,67 @@ public RecoveryResponse recovery() throws IOException, ElasticsearchException { // ----- Endpoint: cat.repositories /** - * Get snapshot repository information. Get a list of snapshot repositories for - * a cluster. IMPORTANT: cat APIs are only intended for human consumption using - * the command line or Kibana console. They are not intended for use by - * applications. For application consumption, use the get snapshot repository - * API. + * Returns the snapshot repositories for a cluster. IMPORTANT: cat APIs are only + * intended for human consumption using the command line or Kibana console. They + * are not intended for use by applications. For application consumption, use + * the get snapshot repository API. + * + * @see Documentation + * on elastic.co + */ + + public RepositoriesResponse repositories(RepositoriesRequest request) throws IOException, ElasticsearchException { + @SuppressWarnings("unchecked") + JsonEndpoint endpoint = (JsonEndpoint) RepositoriesRequest._ENDPOINT; + + return this.transport.performRequest(request, endpoint, this.transportOptions); + } + + /** + * Returns the snapshot repositories for a cluster. IMPORTANT: cat APIs are only + * intended for human consumption using the command line or Kibana console. They + * are not intended for use by applications. For application consumption, use + * the get snapshot repository API. + * + * @param fn + * a function that initializes a builder to create the + * {@link RepositoriesRequest} + * @see Documentation + * on elastic.co + */ + + public final RepositoriesResponse repositories( + Function> fn) + throws IOException, ElasticsearchException { + return repositories(fn.apply(new RepositoriesRequest.Builder()).build()); + } + + /** + * Returns the snapshot repositories for a cluster. IMPORTANT: cat APIs are only + * intended for human consumption using the command line or Kibana console. They + * are not intended for use by applications. For application consumption, use + * the get snapshot repository API. * * @see Documentation * on elastic.co */ + public RepositoriesResponse repositories() throws IOException, ElasticsearchException { - return this.transport.performRequest(RepositoriesRequest._INSTANCE, RepositoriesRequest._ENDPOINT, + return this.transport.performRequest(new RepositoriesRequest.Builder().build(), RepositoriesRequest._ENDPOINT, this.transportOptions); } // ----- Endpoint: cat.segments /** - * Get segment information. Get low-level information about the Lucene segments - * in index shards. For data streams, the API returns information about the - * backing indices. IMPORTANT: cat APIs are only intended for human consumption - * using the command line or Kibana console. They are not intended for use by - * applications. For application consumption, use the index segments API. + * Returns low-level information about the Lucene segments in index shards. For + * data streams, the API returns information about the backing indices. + * IMPORTANT: cat APIs are only intended for human consumption using the command + * line or Kibana console. They are not intended for use by applications. For + * application consumption, use the index segments API. * * @see Documentation @@ -1229,11 +1261,11 @@ public SegmentsResponse segments(SegmentsRequest request) throws IOException, El } /** - * Get segment information. Get low-level information about the Lucene segments - * in index shards. For data streams, the API returns information about the - * backing indices. IMPORTANT: cat APIs are only intended for human consumption - * using the command line or Kibana console. They are not intended for use by - * applications. For application consumption, use the index segments API. + * Returns low-level information about the Lucene segments in index shards. For + * data streams, the API returns information about the backing indices. + * IMPORTANT: cat APIs are only intended for human consumption using the command + * line or Kibana console. They are not intended for use by applications. For + * application consumption, use the index segments API. * * @param fn * a function that initializes a builder to create the @@ -1249,11 +1281,11 @@ public final SegmentsResponse segments(FunctionDocumentation @@ -1268,10 +1300,10 @@ public SegmentsResponse segments() throws IOException, ElasticsearchException { // ----- Endpoint: cat.shards /** - * Get shard information. Get information about the shards in a cluster. For - * data streams, the API returns information about the backing indices. - * IMPORTANT: cat APIs are only intended for human consumption using the command - * line or Kibana console. They are not intended for use by applications. + * Returns information about the shards in a cluster. For data streams, the API + * returns information about the backing indices. IMPORTANT: cat APIs are only + * intended for human consumption using the command line or Kibana console. They + * are not intended for use by applications. * * @see Documentation @@ -1286,10 +1318,10 @@ public ShardsResponse shards(ShardsRequest request) throws IOException, Elastics } /** - * Get shard information. Get information about the shards in a cluster. For - * data streams, the API returns information about the backing indices. - * IMPORTANT: cat APIs are only intended for human consumption using the command - * line or Kibana console. They are not intended for use by applications. + * Returns information about the shards in a cluster. For data streams, the API + * returns information about the backing indices. IMPORTANT: cat APIs are only + * intended for human consumption using the command line or Kibana console. They + * are not intended for use by applications. * * @param fn * a function that initializes a builder to create the @@ -1305,10 +1337,10 @@ public final ShardsResponse shards(FunctionDocumentation @@ -1323,12 +1355,11 @@ public ShardsResponse shards() throws IOException, ElasticsearchException { // ----- Endpoint: cat.snapshots /** - * Get snapshot information Get information about the snapshots stored in one or - * more repositories. A snapshot is a backup of an index or running - * Elasticsearch cluster. IMPORTANT: cat APIs are only intended for human - * consumption using the command line or Kibana console. They are not intended - * for use by applications. For application consumption, use the get snapshot - * API. + * Returns information about the snapshots stored in one or more repositories. A + * snapshot is a backup of an index or running Elasticsearch cluster. IMPORTANT: + * cat APIs are only intended for human consumption using the command line or + * Kibana console. They are not intended for use by applications. For + * application consumption, use the get snapshot API. * * @see Documentation @@ -1343,12 +1374,11 @@ public SnapshotsResponse snapshots(SnapshotsRequest request) throws IOException, } /** - * Get snapshot information Get information about the snapshots stored in one or - * more repositories. A snapshot is a backup of an index or running - * Elasticsearch cluster. IMPORTANT: cat APIs are only intended for human - * consumption using the command line or Kibana console. They are not intended - * for use by applications. For application consumption, use the get snapshot - * API. + * Returns information about the snapshots stored in one or more repositories. A + * snapshot is a backup of an index or running Elasticsearch cluster. IMPORTANT: + * cat APIs are only intended for human consumption using the command line or + * Kibana console. They are not intended for use by applications. For + * application consumption, use the get snapshot API. * * @param fn * a function that initializes a builder to create the @@ -1364,12 +1394,11 @@ public final SnapshotsResponse snapshots(FunctionDocumentation @@ -1384,10 +1413,10 @@ public SnapshotsResponse snapshots() throws IOException, ElasticsearchException // ----- Endpoint: cat.tasks /** - * Get task information. Get information about tasks currently running in the - * cluster. IMPORTANT: cat APIs are only intended for human consumption using - * the command line or Kibana console. They are not intended for use by - * applications. For application consumption, use the task management API. + * Returns information about tasks currently executing in the cluster. + * IMPORTANT: cat APIs are only intended for human consumption using the command + * line or Kibana console. They are not intended for use by applications. For + * application consumption, use the task management API. * * @see Documentation @@ -1402,10 +1431,10 @@ public TasksResponse tasks(TasksRequest request) throws IOException, Elasticsear } /** - * Get task information. Get information about tasks currently running in the - * cluster. IMPORTANT: cat APIs are only intended for human consumption using - * the command line or Kibana console. They are not intended for use by - * applications. For application consumption, use the task management API. + * Returns information about tasks currently executing in the cluster. + * IMPORTANT: cat APIs are only intended for human consumption using the command + * line or Kibana console. They are not intended for use by applications. For + * application consumption, use the task management API. * * @param fn * a function that initializes a builder to create the @@ -1421,10 +1450,10 @@ public final TasksResponse tasks(FunctionDocumentation @@ -1439,12 +1468,11 @@ public TasksResponse tasks() throws IOException, ElasticsearchException { // ----- Endpoint: cat.templates /** - * Get index template information. Get information about the index templates in - * a cluster. You can use index templates to apply index settings and field - * mappings to new indices at creation. IMPORTANT: cat APIs are only intended - * for human consumption using the command line or Kibana console. They are not - * intended for use by applications. For application consumption, use the get - * index template API. + * Returns information about index templates in a cluster. You can use index + * templates to apply index settings and field mappings to new indices at + * creation. IMPORTANT: cat APIs are only intended for human consumption using + * the command line or Kibana console. They are not intended for use by + * applications. For application consumption, use the get index template API. * * @see Documentation @@ -1459,12 +1487,11 @@ public TemplatesResponse templates(TemplatesRequest request) throws IOException, } /** - * Get index template information. Get information about the index templates in - * a cluster. You can use index templates to apply index settings and field - * mappings to new indices at creation. IMPORTANT: cat APIs are only intended - * for human consumption using the command line or Kibana console. They are not - * intended for use by applications. For application consumption, use the get - * index template API. + * Returns information about index templates in a cluster. You can use index + * templates to apply index settings and field mappings to new indices at + * creation. IMPORTANT: cat APIs are only intended for human consumption using + * the command line or Kibana console. They are not intended for use by + * applications. For application consumption, use the get index template API. * * @param fn * a function that initializes a builder to create the @@ -1480,12 +1507,11 @@ public final TemplatesResponse templates(FunctionDocumentation @@ -1500,11 +1526,11 @@ public TemplatesResponse templates() throws IOException, ElasticsearchException // ----- Endpoint: cat.thread_pool /** - * Get thread pool statistics. Get thread pool statistics for each node in a - * cluster. Returned information includes all built-in thread pools and custom - * thread pools. IMPORTANT: cat APIs are only intended for human consumption - * using the command line or Kibana console. They are not intended for use by - * applications. For application consumption, use the nodes info API. + * Returns thread pool statistics for each node in a cluster. Returned + * information includes all built-in thread pools and custom thread pools. + * IMPORTANT: cat APIs are only intended for human consumption using the command + * line or Kibana console. They are not intended for use by applications. For + * application consumption, use the nodes info API. * * @see Documentation @@ -1519,11 +1545,11 @@ public ThreadPoolResponse threadPool(ThreadPoolRequest request) throws IOExcepti } /** - * Get thread pool statistics. Get thread pool statistics for each node in a - * cluster. Returned information includes all built-in thread pools and custom - * thread pools. IMPORTANT: cat APIs are only intended for human consumption - * using the command line or Kibana console. They are not intended for use by - * applications. For application consumption, use the nodes info API. + * Returns thread pool statistics for each node in a cluster. Returned + * information includes all built-in thread pools and custom thread pools. + * IMPORTANT: cat APIs are only intended for human consumption using the command + * line or Kibana console. They are not intended for use by applications. For + * application consumption, use the nodes info API. * * @param fn * a function that initializes a builder to create the @@ -1539,11 +1565,11 @@ public final ThreadPoolResponse threadPool(FunctionDocumentation @@ -1558,8 +1584,7 @@ public ThreadPoolResponse threadPool() throws IOException, ElasticsearchExceptio // ----- Endpoint: cat.transforms /** - * Get transform information. Get configuration and usage information about - * transforms. + * Get transforms. Returns configuration and usage information about transforms. *

* CAT APIs are only intended for human consumption using the Kibana console or * command line. They are not intended for use by applications. For application @@ -1578,8 +1603,7 @@ public TransformsResponse transforms(TransformsRequest request) throws IOExcepti } /** - * Get transform information. Get configuration and usage information about - * transforms. + * Get transforms. Returns configuration and usage information about transforms. *

* CAT APIs are only intended for human consumption using the Kibana console or * command line. They are not intended for use by applications. For application @@ -1599,8 +1623,7 @@ public final TransformsResponse transforms(Function * CAT APIs are only intended for human consumption using the Kibana console or * command line. They are not intended for use by applications. For application diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/FielddataRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/FielddataRequest.java index 7cbd6677f..ca611a8dd 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/FielddataRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/FielddataRequest.java @@ -57,11 +57,11 @@ // typedef: cat.fielddata.Request /** - * Get field data cache information. Get the amount of heap memory currently - * used by the field data cache on every data node in the cluster. IMPORTANT: - * cat APIs are only intended for human consumption using the command line or - * Kibana console. They are not intended for use by applications. For - * application consumption, use the nodes stats API. + * Returns the amount of heap memory currently used by the field data cache on + * every data node in the cluster. IMPORTANT: cat APIs are only intended for + * human consumption using the command line or Kibana console. They are not + * intended for use by applications. For application consumption, use the nodes + * stats API. * * @see API * specification diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/HealthRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/HealthRequest.java index a5849749a..14e470a02 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/HealthRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/HealthRequest.java @@ -55,18 +55,18 @@ // typedef: cat.health.Request /** - * Get the cluster health status. IMPORTANT: cat APIs are only intended for - * human consumption using the command line or Kibana console. They are not - * intended for use by applications. For application consumption, use the - * cluster health API. This API is often used to check malfunctioning clusters. - * To help you track cluster health alongside log files and alerting systems, - * the API returns timestamps in two formats: HH:MM:SS, which is - * human-readable but includes no date information; - * Unix epoch time, which is machine-sortable and includes date - * information. The latter format is useful for cluster recoveries that take - * multiple days. You can use the cat health API to verify cluster health across - * multiple nodes. You also can use the API to track the recovery of a large - * cluster over a longer period of time. + * Returns the health status of a cluster, similar to the cluster health API. + * IMPORTANT: cat APIs are only intended for human consumption using the command + * line or Kibana console. They are not intended for use by applications. For + * application consumption, use the cluster health API. This API is often used + * to check malfunctioning clusters. To help you track cluster health alongside + * log files and alerting systems, the API returns timestamps in two formats: + * HH:MM:SS, which is human-readable but includes no date + * information; Unix epoch time, which is machine-sortable and + * includes date information. The latter format is useful for cluster recoveries + * that take multiple days. You can use the cat health API to verify cluster + * health across multiple nodes. You also can use the API to track the recovery + * of a large cluster over a longer period of time. * * @see API * specification diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/HelpRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/HelpRequest.java index 92567555f..78d0d996a 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/HelpRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/HelpRequest.java @@ -29,8 +29,6 @@ import co.elastic.clients.util.ObjectBuilder; import jakarta.json.stream.JsonGenerator; import java.util.Collections; -import java.util.HashMap; -import java.util.Map; import java.util.Objects; //---------------------------------------------------------------- @@ -57,7 +55,7 @@ * specification */ -public class HelpRequest extends CatRequestBase { +public class HelpRequest { public HelpRequest() { } @@ -93,9 +91,7 @@ public HelpRequest() { // Request parameters request -> { - Map params = new HashMap<>(); - params.put("format", "json"); - return params; + return Collections.emptyMap(); }, SimpleEndpoint.emptyMap(), false, HelpResponse._DESERIALIZER); } diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/HelpResponse.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/HelpResponse.java index 0edc100ee..03fced1f8 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/HelpResponse.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/HelpResponse.java @@ -19,23 +19,13 @@ package co.elastic.clients.elasticsearch.cat; -import co.elastic.clients.elasticsearch.cat.help.HelpRecord; import co.elastic.clients.json.JsonpDeserializable; import co.elastic.clients.json.JsonpDeserializer; -import co.elastic.clients.json.JsonpMapper; -import co.elastic.clients.json.JsonpSerializable; -import co.elastic.clients.json.JsonpUtils; import co.elastic.clients.json.ObjectBuilderDeserializer; import co.elastic.clients.json.ObjectDeserializer; -import co.elastic.clients.util.ApiTypeHelper; import co.elastic.clients.util.ObjectBuilder; -import co.elastic.clients.util.WithJsonObjectBuilderBase; import jakarta.json.stream.JsonGenerator; -import jakarta.json.stream.JsonParser; -import java.util.List; import java.util.Objects; -import java.util.function.Function; -import javax.annotation.Nullable; //---------------------------------------------------------------- // THIS CODE IS GENERATED. MANUAL EDITS WILL BE LOST. @@ -59,120 +49,17 @@ * @see API * specification */ -@JsonpDeserializable -public class HelpResponse implements JsonpSerializable { - private final List valueBody; - // --------------------------------------------------------------------------------------------- - - private HelpResponse(Builder builder) { - - this.valueBody = ApiTypeHelper.unmodifiableRequired(builder.valueBody, this, "valueBody"); - - } - - public static HelpResponse of(Function> fn) { - return fn.apply(new Builder()).build(); - } - - /** - * Required - Response value. - */ - public final List valueBody() { - return this.valueBody; +public class HelpResponse { + public HelpResponse() { } /** - * Serialize this value to JSON. + * Singleton instance for {@link HelpResponse}. */ - public void serialize(JsonGenerator generator, JsonpMapper mapper) { - generator.writeStartArray(); - for (HelpRecord item0 : this.valueBody) { - item0.serialize(generator, mapper); - - } - generator.writeEnd(); - - } + public static final HelpResponse _INSTANCE = new HelpResponse(); - @Override - public String toString() { - return JsonpUtils.toString(this); - } - - // --------------------------------------------------------------------------------------------- - - /** - * Builder for {@link HelpResponse}. - */ - - public static class Builder extends WithJsonObjectBuilderBase implements ObjectBuilder { - private List valueBody; - - /** - * Required - Response value. - *

- * Adds all elements of list to valueBody. - */ - public final Builder valueBody(List list) { - this.valueBody = _listAddAll(this.valueBody, list); - return this; - } - - /** - * Required - Response value. - *

- * Adds one or more values to valueBody. - */ - public final Builder valueBody(HelpRecord value, HelpRecord... values) { - this.valueBody = _listAdd(this.valueBody, value, values); - return this; - } - - /** - * Required - Response value. - *

- * Adds a value to valueBody using a builder lambda. - */ - public final Builder valueBody(Function> fn) { - return valueBody(fn.apply(new HelpRecord.Builder()).build()); - } - - @Override - public Builder withJson(JsonParser parser, JsonpMapper mapper) { - - @SuppressWarnings("unchecked") - List value = (List) JsonpDeserializer.arrayDeserializer(HelpRecord._DESERIALIZER) - .deserialize(parser, mapper); - return this.valueBody(value); - } - - @Override - protected Builder self() { - return this; - } - - /** - * Builds a {@link HelpResponse}. - * - * @throws NullPointerException - * if some of the required fields are null. - */ - public HelpResponse build() { - _checkSingleUse(); - - return new HelpResponse(this); - } - } - - public static final JsonpDeserializer _DESERIALIZER = createHelpResponseDeserializer(); - protected static JsonpDeserializer createHelpResponseDeserializer() { - - JsonpDeserializer> valueDeserializer = JsonpDeserializer - .arrayDeserializer(HelpRecord._DESERIALIZER); - - return JsonpDeserializer.of(valueDeserializer.acceptedEvents(), (parser, mapper, event) -> new Builder() - .valueBody(valueDeserializer.deserialize(parser, mapper, event)).build()); - } + public static final JsonpDeserializer _DESERIALIZER = JsonpDeserializer + .emptyObject(HelpResponse._INSTANCE); } diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/IndicesRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/IndicesRequest.java index af07f14e5..794b548b5 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/IndicesRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/IndicesRequest.java @@ -23,6 +23,7 @@ import co.elastic.clients.elasticsearch._types.ErrorResponse; import co.elastic.clients.elasticsearch._types.ExpandWildcard; import co.elastic.clients.elasticsearch._types.HealthStatus; +import co.elastic.clients.elasticsearch._types.Time; import co.elastic.clients.elasticsearch._types.TimeUnit; import co.elastic.clients.json.JsonpDeserializable; import co.elastic.clients.json.JsonpDeserializer; @@ -101,6 +102,9 @@ public class IndicesRequest extends CatRequestBase { private final List index; + @Nullable + private final Time masterTimeout; + @Nullable private final Boolean pri; @@ -116,6 +120,7 @@ private IndicesRequest(Builder builder) { this.health = builder.health; this.includeUnloadedSegments = builder.includeUnloadedSegments; this.index = ApiTypeHelper.unmodifiable(builder.index); + this.masterTimeout = builder.masterTimeout; this.pri = builder.pri; this.time = builder.time; @@ -177,6 +182,16 @@ public final List index() { return this.index; } + /** + * Period to wait for a connection to the master node. + *

+ * API name: {@code master_timeout} + */ + @Nullable + public final Time masterTimeout() { + return this.masterTimeout; + } + /** * If true, the response only includes information from primary shards. *

@@ -221,6 +236,9 @@ public static class Builder extends CatRequestBase.AbstractBuilder @Nullable private List index; + @Nullable + private Time masterTimeout; + @Nullable private Boolean pri; @@ -311,6 +329,25 @@ public final Builder index(String value, String... values) { return this; } + /** + * Period to wait for a connection to the master node. + *

+ * API name: {@code master_timeout} + */ + public final Builder masterTimeout(@Nullable Time value) { + this.masterTimeout = value; + return this; + } + + /** + * Period to wait for a connection to the master node. + *

+ * API name: {@code master_timeout} + */ + public final Builder masterTimeout(Function> fn) { + return this.masterTimeout(fn.apply(new Time.Builder()).build()); + } + /** * If true, the response only includes information from primary shards. *

@@ -412,6 +449,9 @@ public IndicesRequest build() { request -> { Map params = new HashMap<>(); params.put("format", "json"); + if (request.masterTimeout != null) { + params.put("master_timeout", request.masterTimeout._toJsonString()); + } if (ApiTypeHelper.isDefined(request.expandWildcards)) { params.put("expand_wildcards", request.expandWildcards.stream().map(v -> v.jsonValue()).collect(Collectors.joining(","))); diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/MasterRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/MasterRequest.java index a9cc80187..d1af00023 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/MasterRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/MasterRequest.java @@ -20,6 +20,7 @@ package co.elastic.clients.elasticsearch.cat; import co.elastic.clients.elasticsearch._types.ErrorResponse; +import co.elastic.clients.elasticsearch._types.Time; import co.elastic.clients.json.JsonpDeserializable; import co.elastic.clients.json.JsonpDeserializer; import co.elastic.clients.json.ObjectBuilderDeserializer; @@ -54,11 +55,10 @@ // typedef: cat.master.Request /** - * Get master node information. Get information about the master node, including - * the ID, bound IP address, and name. IMPORTANT: cat APIs are only intended for - * human consumption using the command line or Kibana console. They are not - * intended for use by applications. For application consumption, use the nodes - * info API. + * Returns information about the master node, including the ID, bound IP + * address, and name. IMPORTANT: cat APIs are only intended for human + * consumption using the command line or Kibana console. They are not intended + * for use by applications. For application consumption, use the nodes info API. * * @see API * specification @@ -68,11 +68,15 @@ public class MasterRequest extends CatRequestBase { @Nullable private final Boolean local; + @Nullable + private final Time masterTimeout; + // --------------------------------------------------------------------------------------------- private MasterRequest(Builder builder) { this.local = builder.local; + this.masterTimeout = builder.masterTimeout; } @@ -94,6 +98,16 @@ public final Boolean local() { return this.local; } + /** + * Period to wait for a connection to the master node. + *

+ * API name: {@code master_timeout} + */ + @Nullable + public final Time masterTimeout() { + return this.masterTimeout; + } + // --------------------------------------------------------------------------------------------- /** @@ -106,6 +120,9 @@ public static class Builder extends CatRequestBase.AbstractBuilder @Nullable private Boolean local; + @Nullable + private Time masterTimeout; + /** * If true, the request computes the list of selected nodes from * the local cluster state. If false the list of selected nodes are @@ -120,6 +137,25 @@ public final Builder local(@Nullable Boolean value) { return this; } + /** + * Period to wait for a connection to the master node. + *

+ * API name: {@code master_timeout} + */ + public final Builder masterTimeout(@Nullable Time value) { + this.masterTimeout = value; + return this; + } + + /** + * Period to wait for a connection to the master node. + *

+ * API name: {@code master_timeout} + */ + public final Builder masterTimeout(Function> fn) { + return this.masterTimeout(fn.apply(new Time.Builder()).build()); + } + @Override protected Builder self() { return this; @@ -167,6 +203,9 @@ public MasterRequest build() { request -> { Map params = new HashMap<>(); params.put("format", "json"); + if (request.masterTimeout != null) { + params.put("master_timeout", request.masterTimeout._toJsonString()); + } if (request.local != null) { params.put("local", String.valueOf(request.local)); } diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/MlDataFrameAnalyticsRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/MlDataFrameAnalyticsRequest.java index c257917f4..2457f9c21 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/MlDataFrameAnalyticsRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/MlDataFrameAnalyticsRequest.java @@ -21,7 +21,7 @@ import co.elastic.clients.elasticsearch._types.Bytes; import co.elastic.clients.elasticsearch._types.ErrorResponse; -import co.elastic.clients.elasticsearch._types.Time; +import co.elastic.clients.elasticsearch._types.TimeUnit; import co.elastic.clients.json.JsonpDeserializable; import co.elastic.clients.json.JsonpDeserializer; import co.elastic.clients.json.ObjectBuilderDeserializer; @@ -86,7 +86,7 @@ public class MlDataFrameAnalyticsRequest extends CatRequestBase { private final List s; @Nullable - private final Time time; + private final TimeUnit time; // --------------------------------------------------------------------------------------------- @@ -161,7 +161,7 @@ public final List s() { * API name: {@code time} */ @Nullable - public final Time time() { + public final TimeUnit time() { return this.time; } @@ -190,7 +190,7 @@ public static class Builder extends CatRequestBase.AbstractBuilder private List s; @Nullable - private Time time; + private TimeUnit time; /** * Whether to ignore if a wildcard expression matches no configs. (This includes @@ -278,20 +278,11 @@ public final Builder s(CatDfaColumn value, CatDfaColumn... values) { *

* API name: {@code time} */ - public final Builder time(@Nullable Time value) { + public final Builder time(@Nullable TimeUnit value) { this.time = value; return this; } - /** - * Unit used to display time values. - *

- * API name: {@code time} - */ - public final Builder time(Function> fn) { - return this.time(fn.apply(new Time.Builder()).build()); - } - @Override protected Builder self() { return this; @@ -387,7 +378,7 @@ public MlDataFrameAnalyticsRequest build() { params.put("h", request.h.stream().map(v -> v.jsonValue()).collect(Collectors.joining(","))); } if (request.time != null) { - params.put("time", request.time._toJsonString()); + params.put("time", request.time.jsonValue()); } if (request.allowNoMatch != null) { params.put("allow_no_match", String.valueOf(request.allowNoMatch)); diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/MlTrainedModelsRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/MlTrainedModelsRequest.java index fe3bc8011..b5beb9f66 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/MlTrainedModelsRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/MlTrainedModelsRequest.java @@ -21,6 +21,7 @@ import co.elastic.clients.elasticsearch._types.Bytes; import co.elastic.clients.elasticsearch._types.ErrorResponse; +import co.elastic.clients.elasticsearch._types.TimeUnit; import co.elastic.clients.json.JsonpDeserializable; import co.elastic.clients.json.JsonpDeserializer; import co.elastic.clients.json.ObjectBuilderDeserializer; @@ -90,6 +91,9 @@ public class MlTrainedModelsRequest extends CatRequestBase { @Nullable private final Integer size; + @Nullable + private final TimeUnit time; + // --------------------------------------------------------------------------------------------- private MlTrainedModelsRequest(Builder builder) { @@ -101,6 +105,7 @@ private MlTrainedModelsRequest(Builder builder) { this.modelId = builder.modelId; this.s = ApiTypeHelper.unmodifiable(builder.s); this.size = builder.size; + this.time = builder.time; } @@ -182,6 +187,16 @@ public final Integer size() { return this.size; } + /** + * Unit used to display time values. + *

+ * API name: {@code time} + */ + @Nullable + public final TimeUnit time() { + return this.time; + } + // --------------------------------------------------------------------------------------------- /** @@ -212,6 +227,9 @@ public static class Builder extends CatRequestBase.AbstractBuilder @Nullable private Integer size; + @Nullable + private TimeUnit time; + /** * Specifies what to do when the request: contains wildcard expressions and * there are no models that match; contains the _all string or no @@ -316,6 +334,16 @@ public final Builder size(@Nullable Integer value) { return this; } + /** + * Unit used to display time values. + *

+ * API name: {@code time} + */ + public final Builder time(@Nullable TimeUnit value) { + this.time = value; + return this; + } + @Override protected Builder self() { return this; @@ -414,6 +442,9 @@ public MlTrainedModelsRequest build() { if (request.from != null) { params.put("from", String.valueOf(request.from)); } + if (request.time != null) { + params.put("time", request.time.jsonValue()); + } if (request.allowNoMatch != null) { params.put("allow_no_match", String.valueOf(request.allowNoMatch)); } diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/NodeattrsRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/NodeattrsRequest.java index d97911919..45ba620ce 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/NodeattrsRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/NodeattrsRequest.java @@ -20,6 +20,7 @@ package co.elastic.clients.elasticsearch.cat; import co.elastic.clients.elasticsearch._types.ErrorResponse; +import co.elastic.clients.elasticsearch._types.Time; import co.elastic.clients.json.JsonpDeserializable; import co.elastic.clients.json.JsonpDeserializer; import co.elastic.clients.json.ObjectBuilderDeserializer; @@ -54,10 +55,10 @@ // typedef: cat.nodeattrs.Request /** - * Get node attribute information. Get information about custom node attributes. - * IMPORTANT: cat APIs are only intended for human consumption using the command - * line or Kibana console. They are not intended for use by applications. For - * application consumption, use the nodes info API. + * Returns information about custom node attributes. IMPORTANT: cat APIs are + * only intended for human consumption using the command line or Kibana console. + * They are not intended for use by applications. For application consumption, + * use the nodes info API. * * @see API * specification @@ -67,11 +68,15 @@ public class NodeattrsRequest extends CatRequestBase { @Nullable private final Boolean local; + @Nullable + private final Time masterTimeout; + // --------------------------------------------------------------------------------------------- private NodeattrsRequest(Builder builder) { this.local = builder.local; + this.masterTimeout = builder.masterTimeout; } @@ -93,6 +98,16 @@ public final Boolean local() { return this.local; } + /** + * Period to wait for a connection to the master node. + *

+ * API name: {@code master_timeout} + */ + @Nullable + public final Time masterTimeout() { + return this.masterTimeout; + } + // --------------------------------------------------------------------------------------------- /** @@ -105,6 +120,9 @@ public static class Builder extends CatRequestBase.AbstractBuilder @Nullable private Boolean local; + @Nullable + private Time masterTimeout; + /** * If true, the request computes the list of selected nodes from * the local cluster state. If false the list of selected nodes are @@ -119,6 +137,25 @@ public final Builder local(@Nullable Boolean value) { return this; } + /** + * Period to wait for a connection to the master node. + *

+ * API name: {@code master_timeout} + */ + public final Builder masterTimeout(@Nullable Time value) { + this.masterTimeout = value; + return this; + } + + /** + * Period to wait for a connection to the master node. + *

+ * API name: {@code master_timeout} + */ + public final Builder masterTimeout(Function> fn) { + return this.masterTimeout(fn.apply(new Time.Builder()).build()); + } + @Override protected Builder self() { return this; @@ -166,6 +203,9 @@ public NodeattrsRequest build() { request -> { Map params = new HashMap<>(); params.put("format", "json"); + if (request.masterTimeout != null) { + params.put("master_timeout", request.masterTimeout._toJsonString()); + } if (request.local != null) { params.put("local", String.valueOf(request.local)); } diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/NodesRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/NodesRequest.java index 8fea93dc6..e4dab0590 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/NodesRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/NodesRequest.java @@ -21,6 +21,8 @@ import co.elastic.clients.elasticsearch._types.Bytes; import co.elastic.clients.elasticsearch._types.ErrorResponse; +import co.elastic.clients.elasticsearch._types.Time; +import co.elastic.clients.elasticsearch._types.TimeUnit; import co.elastic.clients.json.JsonpDeserializable; import co.elastic.clients.json.JsonpDeserializer; import co.elastic.clients.json.ObjectBuilderDeserializer; @@ -55,10 +57,10 @@ // typedef: cat.nodes.Request /** - * Get node information. Get information about the nodes in a cluster. - * IMPORTANT: cat APIs are only intended for human consumption using the command - * line or Kibana console. They are not intended for use by applications. For - * application consumption, use the nodes info API. + * Returns information about the nodes in a cluster. IMPORTANT: cat APIs are + * only intended for human consumption using the command line or Kibana console. + * They are not intended for use by applications. For application consumption, + * use the nodes info API. * * @see API * specification @@ -74,6 +76,12 @@ public class NodesRequest extends CatRequestBase { @Nullable private final Boolean includeUnloadedSegments; + @Nullable + private final Time masterTimeout; + + @Nullable + private final TimeUnit time; + // --------------------------------------------------------------------------------------------- private NodesRequest(Builder builder) { @@ -81,6 +89,8 @@ private NodesRequest(Builder builder) { this.bytes = builder.bytes; this.fullId = builder.fullId; this.includeUnloadedSegments = builder.includeUnloadedSegments; + this.masterTimeout = builder.masterTimeout; + this.time = builder.time; } @@ -120,6 +130,26 @@ public final Boolean includeUnloadedSegments() { return this.includeUnloadedSegments; } + /** + * Period to wait for a connection to the master node. + *

+ * API name: {@code master_timeout} + */ + @Nullable + public final Time masterTimeout() { + return this.masterTimeout; + } + + /** + * Unit used to display time values. + *

+ * API name: {@code time} + */ + @Nullable + public final TimeUnit time() { + return this.time; + } + // --------------------------------------------------------------------------------------------- /** @@ -136,6 +166,12 @@ public static class Builder extends CatRequestBase.AbstractBuilder impl @Nullable private Boolean includeUnloadedSegments; + @Nullable + private Time masterTimeout; + + @Nullable + private TimeUnit time; + /** * The unit used to display byte values. *

@@ -168,6 +204,35 @@ public final Builder includeUnloadedSegments(@Nullable Boolean value) { return this; } + /** + * Period to wait for a connection to the master node. + *

+ * API name: {@code master_timeout} + */ + public final Builder masterTimeout(@Nullable Time value) { + this.masterTimeout = value; + return this; + } + + /** + * Period to wait for a connection to the master node. + *

+ * API name: {@code master_timeout} + */ + public final Builder masterTimeout(Function> fn) { + return this.masterTimeout(fn.apply(new Time.Builder()).build()); + } + + /** + * Unit used to display time values. + *

+ * API name: {@code time} + */ + public final Builder time(@Nullable TimeUnit value) { + this.time = value; + return this; + } + @Override protected Builder self() { return this; @@ -215,12 +280,18 @@ public NodesRequest build() { request -> { Map params = new HashMap<>(); params.put("format", "json"); + if (request.masterTimeout != null) { + params.put("master_timeout", request.masterTimeout._toJsonString()); + } if (request.bytes != null) { params.put("bytes", request.bytes.jsonValue()); } if (request.includeUnloadedSegments != null) { params.put("include_unloaded_segments", String.valueOf(request.includeUnloadedSegments)); } + if (request.time != null) { + params.put("time", request.time.jsonValue()); + } if (request.fullId != null) { params.put("full_id", String.valueOf(request.fullId)); } diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/PendingTasksRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/PendingTasksRequest.java index 3f08d5276..c16b5f0a3 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/PendingTasksRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/PendingTasksRequest.java @@ -20,6 +20,8 @@ package co.elastic.clients.elasticsearch.cat; import co.elastic.clients.elasticsearch._types.ErrorResponse; +import co.elastic.clients.elasticsearch._types.Time; +import co.elastic.clients.elasticsearch._types.TimeUnit; import co.elastic.clients.json.JsonpDeserializable; import co.elastic.clients.json.JsonpDeserializer; import co.elastic.clients.json.ObjectBuilderDeserializer; @@ -54,11 +56,10 @@ // typedef: cat.pending_tasks.Request /** - * Get pending task information. Get information about cluster-level changes - * that have not yet taken effect. IMPORTANT: cat APIs are only intended for - * human consumption using the command line or Kibana console. They are not - * intended for use by applications. For application consumption, use the - * pending cluster tasks API. + * Returns cluster-level changes that have not yet been executed. IMPORTANT: cat + * APIs are only intended for human consumption using the command line or Kibana + * console. They are not intended for use by applications. For application + * consumption, use the pending cluster tasks API. * * @see API * specification @@ -68,11 +69,19 @@ public class PendingTasksRequest extends CatRequestBase { @Nullable private final Boolean local; + @Nullable + private final Time masterTimeout; + + @Nullable + private final TimeUnit time; + // --------------------------------------------------------------------------------------------- private PendingTasksRequest(Builder builder) { this.local = builder.local; + this.masterTimeout = builder.masterTimeout; + this.time = builder.time; } @@ -94,6 +103,26 @@ public final Boolean local() { return this.local; } + /** + * Period to wait for a connection to the master node. + *

+ * API name: {@code master_timeout} + */ + @Nullable + public final Time masterTimeout() { + return this.masterTimeout; + } + + /** + * Unit used to display time values. + *

+ * API name: {@code time} + */ + @Nullable + public final TimeUnit time() { + return this.time; + } + // --------------------------------------------------------------------------------------------- /** @@ -106,6 +135,12 @@ public static class Builder extends CatRequestBase.AbstractBuilder @Nullable private Boolean local; + @Nullable + private Time masterTimeout; + + @Nullable + private TimeUnit time; + /** * If true, the request computes the list of selected nodes from * the local cluster state. If false the list of selected nodes are @@ -120,6 +155,35 @@ public final Builder local(@Nullable Boolean value) { return this; } + /** + * Period to wait for a connection to the master node. + *

+ * API name: {@code master_timeout} + */ + public final Builder masterTimeout(@Nullable Time value) { + this.masterTimeout = value; + return this; + } + + /** + * Period to wait for a connection to the master node. + *

+ * API name: {@code master_timeout} + */ + public final Builder masterTimeout(Function> fn) { + return this.masterTimeout(fn.apply(new Time.Builder()).build()); + } + + /** + * Unit used to display time values. + *

+ * API name: {@code time} + */ + public final Builder time(@Nullable TimeUnit value) { + this.time = value; + return this; + } + @Override protected Builder self() { return this; @@ -167,6 +231,12 @@ public PendingTasksRequest build() { request -> { Map params = new HashMap<>(); params.put("format", "json"); + if (request.masterTimeout != null) { + params.put("master_timeout", request.masterTimeout._toJsonString()); + } + if (request.time != null) { + params.put("time", request.time.jsonValue()); + } if (request.local != null) { params.put("local", String.valueOf(request.local)); } diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/PluginsRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/PluginsRequest.java index 1487f4cf1..478c854dd 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/PluginsRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/PluginsRequest.java @@ -20,6 +20,7 @@ package co.elastic.clients.elasticsearch.cat; import co.elastic.clients.elasticsearch._types.ErrorResponse; +import co.elastic.clients.elasticsearch._types.Time; import co.elastic.clients.json.JsonpDeserializable; import co.elastic.clients.json.JsonpDeserializer; import co.elastic.clients.json.ObjectBuilderDeserializer; @@ -54,24 +55,32 @@ // typedef: cat.plugins.Request /** - * Get plugin information. Get a list of plugins running on each node of a - * cluster. IMPORTANT: cat APIs are only intended for human consumption using - * the command line or Kibana console. They are not intended for use by - * applications. For application consumption, use the nodes info API. + * Returns a list of plugins running on each node of a cluster. IMPORTANT: cat + * APIs are only intended for human consumption using the command line or Kibana + * console. They are not intended for use by applications. For application + * consumption, use the nodes info API. * * @see API * specification */ public class PluginsRequest extends CatRequestBase { + @Nullable + private final Boolean includeBootstrap; + @Nullable private final Boolean local; + @Nullable + private final Time masterTimeout; + // --------------------------------------------------------------------------------------------- private PluginsRequest(Builder builder) { + this.includeBootstrap = builder.includeBootstrap; this.local = builder.local; + this.masterTimeout = builder.masterTimeout; } @@ -79,6 +88,16 @@ public static PluginsRequest of(Function> return fn.apply(new Builder()).build(); } + /** + * Include bootstrap plugins in the response + *

+ * API name: {@code include_bootstrap} + */ + @Nullable + public final Boolean includeBootstrap() { + return this.includeBootstrap; + } + /** * If true, the request computes the list of selected nodes from * the local cluster state. If false the list of selected nodes are @@ -93,6 +112,16 @@ public final Boolean local() { return this.local; } + /** + * Period to wait for a connection to the master node. + *

+ * API name: {@code master_timeout} + */ + @Nullable + public final Time masterTimeout() { + return this.masterTimeout; + } + // --------------------------------------------------------------------------------------------- /** @@ -102,9 +131,25 @@ public final Boolean local() { public static class Builder extends CatRequestBase.AbstractBuilder implements ObjectBuilder { + @Nullable + private Boolean includeBootstrap; + @Nullable private Boolean local; + @Nullable + private Time masterTimeout; + + /** + * Include bootstrap plugins in the response + *

+ * API name: {@code include_bootstrap} + */ + public final Builder includeBootstrap(@Nullable Boolean value) { + this.includeBootstrap = value; + return this; + } + /** * If true, the request computes the list of selected nodes from * the local cluster state. If false the list of selected nodes are @@ -119,6 +164,25 @@ public final Builder local(@Nullable Boolean value) { return this; } + /** + * Period to wait for a connection to the master node. + *

+ * API name: {@code master_timeout} + */ + public final Builder masterTimeout(@Nullable Time value) { + this.masterTimeout = value; + return this; + } + + /** + * Period to wait for a connection to the master node. + *

+ * API name: {@code master_timeout} + */ + public final Builder masterTimeout(Function> fn) { + return this.masterTimeout(fn.apply(new Time.Builder()).build()); + } + @Override protected Builder self() { return this; @@ -166,6 +230,12 @@ public PluginsRequest build() { request -> { Map params = new HashMap<>(); params.put("format", "json"); + if (request.masterTimeout != null) { + params.put("master_timeout", request.masterTimeout._toJsonString()); + } + if (request.includeBootstrap != null) { + params.put("include_bootstrap", String.valueOf(request.includeBootstrap)); + } if (request.local != null) { params.put("local", String.valueOf(request.local)); } diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/RecoveryRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/RecoveryRequest.java index 37e01d4c0..fdd22af97 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/RecoveryRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/RecoveryRequest.java @@ -21,6 +21,7 @@ import co.elastic.clients.elasticsearch._types.Bytes; import co.elastic.clients.elasticsearch._types.ErrorResponse; +import co.elastic.clients.elasticsearch._types.TimeUnit; import co.elastic.clients.json.JsonpDeserializable; import co.elastic.clients.json.JsonpDeserializer; import co.elastic.clients.json.ObjectBuilderDeserializer; @@ -58,15 +59,15 @@ // typedef: cat.recovery.Request /** - * Get shard recovery information. Get information about ongoing and completed - * shard recoveries. Shard recovery is the process of initializing a shard copy, - * such as restoring a primary shard from a snapshot or syncing a replica shard - * from a primary shard. When a shard recovery completes, the recovered shard is - * available for search and indexing. For data streams, the API returns - * information about the stream’s backing indices. IMPORTANT: cat APIs are only - * intended for human consumption using the command line or Kibana console. They - * are not intended for use by applications. For application consumption, use - * the index recovery API. + * Returns information about ongoing and completed shard recoveries. Shard + * recovery is the process of initializing a shard copy, such as restoring a + * primary shard from a snapshot or syncing a replica shard from a primary + * shard. When a shard recovery completes, the recovered shard is available for + * search and indexing. For data streams, the API returns information about the + * stream’s backing indices. IMPORTANT: cat APIs are only intended for human + * consumption using the command line or Kibana console. They are not intended + * for use by applications. For application consumption, use the index recovery + * API. * * @see API * specification @@ -84,6 +85,9 @@ public class RecoveryRequest extends CatRequestBase { private final List index; + @Nullable + private final TimeUnit time; + // --------------------------------------------------------------------------------------------- private RecoveryRequest(Builder builder) { @@ -92,6 +96,7 @@ private RecoveryRequest(Builder builder) { this.bytes = builder.bytes; this.detailed = builder.detailed; this.index = ApiTypeHelper.unmodifiable(builder.index); + this.time = builder.time; } @@ -141,6 +146,16 @@ public final List index() { return this.index; } + /** + * Unit used to display time values. + *

+ * API name: {@code time} + */ + @Nullable + public final TimeUnit time() { + return this.time; + } + // --------------------------------------------------------------------------------------------- /** @@ -162,6 +177,9 @@ public static class Builder extends CatRequestBase.AbstractBuilder @Nullable private List index; + @Nullable + private TimeUnit time; + /** * If true, the response only includes ongoing shard recoveries. *

@@ -221,6 +239,16 @@ public final Builder index(String value, String... values) { return this; } + /** + * Unit used to display time values. + *

+ * API name: {@code time} + */ + public final Builder time(@Nullable TimeUnit value) { + this.time = value; + return this; + } + @Override protected Builder self() { return this; @@ -311,6 +339,9 @@ public RecoveryRequest build() { if (request.bytes != null) { params.put("bytes", request.bytes.jsonValue()); } + if (request.time != null) { + params.put("time", request.time.jsonValue()); + } return params; }, SimpleEndpoint.emptyMap(), false, RecoveryResponse._DESERIALIZER); diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/RepositoriesRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/RepositoriesRequest.java index 99f6cd8f7..207b8b7f5 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/RepositoriesRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/RepositoriesRequest.java @@ -20,6 +20,7 @@ package co.elastic.clients.elasticsearch.cat; import co.elastic.clients.elasticsearch._types.ErrorResponse; +import co.elastic.clients.elasticsearch._types.Time; import co.elastic.clients.json.JsonpDeserializable; import co.elastic.clients.json.JsonpDeserializer; import co.elastic.clients.json.ObjectBuilderDeserializer; @@ -28,10 +29,13 @@ import co.elastic.clients.transport.endpoints.SimpleEndpoint; import co.elastic.clients.util.ObjectBuilder; import jakarta.json.stream.JsonGenerator; +import java.lang.Boolean; import java.util.Collections; import java.util.HashMap; import java.util.Map; import java.util.Objects; +import java.util.function.Function; +import javax.annotation.Nullable; //---------------------------------------------------------------- // THIS CODE IS GENERATED. MANUAL EDITS WILL BE LOST. @@ -51,24 +55,124 @@ // typedef: cat.repositories.Request /** - * Get snapshot repository information. Get a list of snapshot repositories for - * a cluster. IMPORTANT: cat APIs are only intended for human consumption using - * the command line or Kibana console. They are not intended for use by - * applications. For application consumption, use the get snapshot repository - * API. + * Returns the snapshot repositories for a cluster. IMPORTANT: cat APIs are only + * intended for human consumption using the command line or Kibana console. They + * are not intended for use by applications. For application consumption, use + * the get snapshot repository API. * * @see API * specification */ public class RepositoriesRequest extends CatRequestBase { - public RepositoriesRequest() { + @Nullable + private final Boolean local; + + @Nullable + private final Time masterTimeout; + + // --------------------------------------------------------------------------------------------- + + private RepositoriesRequest(Builder builder) { + + this.local = builder.local; + this.masterTimeout = builder.masterTimeout; + + } + + public static RepositoriesRequest of(Function> fn) { + return fn.apply(new Builder()).build(); + } + + /** + * If true, the request computes the list of selected nodes from + * the local cluster state. If false the list of selected nodes are + * computed from the cluster state of the master node. In both cases the + * coordinating node will send requests for further information to each selected + * node. + *

+ * API name: {@code local} + */ + @Nullable + public final Boolean local() { + return this.local; + } + + /** + * Period to wait for a connection to the master node. + *

+ * API name: {@code master_timeout} + */ + @Nullable + public final Time masterTimeout() { + return this.masterTimeout; } + // --------------------------------------------------------------------------------------------- + /** - * Singleton instance for {@link RepositoriesRequest}. + * Builder for {@link RepositoriesRequest}. */ - public static final RepositoriesRequest _INSTANCE = new RepositoriesRequest(); + + public static class Builder extends CatRequestBase.AbstractBuilder + implements + ObjectBuilder { + @Nullable + private Boolean local; + + @Nullable + private Time masterTimeout; + + /** + * If true, the request computes the list of selected nodes from + * the local cluster state. If false the list of selected nodes are + * computed from the cluster state of the master node. In both cases the + * coordinating node will send requests for further information to each selected + * node. + *

+ * API name: {@code local} + */ + public final Builder local(@Nullable Boolean value) { + this.local = value; + return this; + } + + /** + * Period to wait for a connection to the master node. + *

+ * API name: {@code master_timeout} + */ + public final Builder masterTimeout(@Nullable Time value) { + this.masterTimeout = value; + return this; + } + + /** + * Period to wait for a connection to the master node. + *

+ * API name: {@code master_timeout} + */ + public final Builder masterTimeout(Function> fn) { + return this.masterTimeout(fn.apply(new Time.Builder()).build()); + } + + @Override + protected Builder self() { + return this; + } + + /** + * Builds a {@link RepositoriesRequest}. + * + * @throws NullPointerException + * if some of the required fields are null. + */ + public RepositoriesRequest build() { + _checkSingleUse(); + + return new RepositoriesRequest(this); + } + } // --------------------------------------------------------------------------------------------- @@ -99,6 +203,12 @@ public RepositoriesRequest() { request -> { Map params = new HashMap<>(); params.put("format", "json"); + if (request.masterTimeout != null) { + params.put("master_timeout", request.masterTimeout._toJsonString()); + } + if (request.local != null) { + params.put("local", String.valueOf(request.local)); + } return params; }, SimpleEndpoint.emptyMap(), false, RepositoriesResponse._DESERIALIZER); diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/SegmentsRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/SegmentsRequest.java index b5199379e..11e8b65b0 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/SegmentsRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/SegmentsRequest.java @@ -21,6 +21,7 @@ import co.elastic.clients.elasticsearch._types.Bytes; import co.elastic.clients.elasticsearch._types.ErrorResponse; +import co.elastic.clients.elasticsearch._types.Time; import co.elastic.clients.json.JsonpDeserializable; import co.elastic.clients.json.JsonpDeserializer; import co.elastic.clients.json.ObjectBuilderDeserializer; @@ -58,11 +59,11 @@ // typedef: cat.segments.Request /** - * Get segment information. Get low-level information about the Lucene segments - * in index shards. For data streams, the API returns information about the - * backing indices. IMPORTANT: cat APIs are only intended for human consumption - * using the command line or Kibana console. They are not intended for use by - * applications. For application consumption, use the index segments API. + * Returns low-level information about the Lucene segments in index shards. For + * data streams, the API returns information about the backing indices. + * IMPORTANT: cat APIs are only intended for human consumption using the command + * line or Kibana console. They are not intended for use by applications. For + * application consumption, use the index segments API. * * @see API * specification @@ -77,6 +78,9 @@ public class SegmentsRequest extends CatRequestBase { @Nullable private final Boolean local; + @Nullable + private final Time masterTimeout; + // --------------------------------------------------------------------------------------------- private SegmentsRequest(Builder builder) { @@ -84,6 +88,7 @@ private SegmentsRequest(Builder builder) { this.bytes = builder.bytes; this.index = ApiTypeHelper.unmodifiable(builder.index); this.local = builder.local; + this.masterTimeout = builder.masterTimeout; } @@ -126,6 +131,16 @@ public final Boolean local() { return this.local; } + /** + * Period to wait for a connection to the master node. + *

+ * API name: {@code master_timeout} + */ + @Nullable + public final Time masterTimeout() { + return this.masterTimeout; + } + // --------------------------------------------------------------------------------------------- /** @@ -144,6 +159,9 @@ public static class Builder extends CatRequestBase.AbstractBuilder @Nullable private Boolean local; + @Nullable + private Time masterTimeout; + /** * The unit used to display byte values. *

@@ -196,6 +214,25 @@ public final Builder local(@Nullable Boolean value) { return this; } + /** + * Period to wait for a connection to the master node. + *

+ * API name: {@code master_timeout} + */ + public final Builder masterTimeout(@Nullable Time value) { + this.masterTimeout = value; + return this; + } + + /** + * Period to wait for a connection to the master node. + *

+ * API name: {@code master_timeout} + */ + public final Builder masterTimeout(Function> fn) { + return this.masterTimeout(fn.apply(new Time.Builder()).build()); + } + @Override protected Builder self() { return this; @@ -277,6 +314,9 @@ public SegmentsRequest build() { request -> { Map params = new HashMap<>(); params.put("format", "json"); + if (request.masterTimeout != null) { + params.put("master_timeout", request.masterTimeout._toJsonString()); + } if (request.bytes != null) { params.put("bytes", request.bytes.jsonValue()); } diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/ShardsRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/ShardsRequest.java index 8ce91892d..3ed656e14 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/ShardsRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/ShardsRequest.java @@ -21,6 +21,8 @@ import co.elastic.clients.elasticsearch._types.Bytes; import co.elastic.clients.elasticsearch._types.ErrorResponse; +import co.elastic.clients.elasticsearch._types.Time; +import co.elastic.clients.elasticsearch._types.TimeUnit; import co.elastic.clients.json.JsonpDeserializable; import co.elastic.clients.json.JsonpDeserializer; import co.elastic.clients.json.ObjectBuilderDeserializer; @@ -57,10 +59,10 @@ // typedef: cat.shards.Request /** - * Get shard information. Get information about the shards in a cluster. For - * data streams, the API returns information about the backing indices. - * IMPORTANT: cat APIs are only intended for human consumption using the command - * line or Kibana console. They are not intended for use by applications. + * Returns information about the shards in a cluster. For data streams, the API + * returns information about the backing indices. IMPORTANT: cat APIs are only + * intended for human consumption using the command line or Kibana console. They + * are not intended for use by applications. * * @see API * specification @@ -72,12 +74,20 @@ public class ShardsRequest extends CatRequestBase { private final List index; + @Nullable + private final Time masterTimeout; + + @Nullable + private final TimeUnit time; + // --------------------------------------------------------------------------------------------- private ShardsRequest(Builder builder) { this.bytes = builder.bytes; this.index = ApiTypeHelper.unmodifiable(builder.index); + this.masterTimeout = builder.masterTimeout; + this.time = builder.time; } @@ -106,6 +116,26 @@ public final List index() { return this.index; } + /** + * Period to wait for a connection to the master node. + *

+ * API name: {@code master_timeout} + */ + @Nullable + public final Time masterTimeout() { + return this.masterTimeout; + } + + /** + * Unit used to display time values. + *

+ * API name: {@code time} + */ + @Nullable + public final TimeUnit time() { + return this.time; + } + // --------------------------------------------------------------------------------------------- /** @@ -121,6 +151,12 @@ public static class Builder extends CatRequestBase.AbstractBuilder @Nullable private List index; + @Nullable + private Time masterTimeout; + + @Nullable + private TimeUnit time; + /** * The unit used to display byte values. *

@@ -159,6 +195,35 @@ public final Builder index(String value, String... values) { return this; } + /** + * Period to wait for a connection to the master node. + *

+ * API name: {@code master_timeout} + */ + public final Builder masterTimeout(@Nullable Time value) { + this.masterTimeout = value; + return this; + } + + /** + * Period to wait for a connection to the master node. + *

+ * API name: {@code master_timeout} + */ + public final Builder masterTimeout(Function> fn) { + return this.masterTimeout(fn.apply(new Time.Builder()).build()); + } + + /** + * Unit used to display time values. + *

+ * API name: {@code time} + */ + public final Builder time(@Nullable TimeUnit value) { + this.time = value; + return this; + } + @Override protected Builder self() { return this; @@ -240,9 +305,15 @@ public ShardsRequest build() { request -> { Map params = new HashMap<>(); params.put("format", "json"); + if (request.masterTimeout != null) { + params.put("master_timeout", request.masterTimeout._toJsonString()); + } if (request.bytes != null) { params.put("bytes", request.bytes.jsonValue()); } + if (request.time != null) { + params.put("time", request.time.jsonValue()); + } return params; }, SimpleEndpoint.emptyMap(), false, ShardsResponse._DESERIALIZER); diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/SnapshotsRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/SnapshotsRequest.java index 1e30a07c4..95749e3f2 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/SnapshotsRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/SnapshotsRequest.java @@ -20,6 +20,8 @@ package co.elastic.clients.elasticsearch.cat; import co.elastic.clients.elasticsearch._types.ErrorResponse; +import co.elastic.clients.elasticsearch._types.Time; +import co.elastic.clients.elasticsearch._types.TimeUnit; import co.elastic.clients.json.JsonpDeserializable; import co.elastic.clients.json.JsonpDeserializer; import co.elastic.clients.json.ObjectBuilderDeserializer; @@ -57,12 +59,11 @@ // typedef: cat.snapshots.Request /** - * Get snapshot information Get information about the snapshots stored in one or - * more repositories. A snapshot is a backup of an index or running - * Elasticsearch cluster. IMPORTANT: cat APIs are only intended for human - * consumption using the command line or Kibana console. They are not intended - * for use by applications. For application consumption, use the get snapshot - * API. + * Returns information about the snapshots stored in one or more repositories. A + * snapshot is a backup of an index or running Elasticsearch cluster. IMPORTANT: + * cat APIs are only intended for human consumption using the command line or + * Kibana console. They are not intended for use by applications. For + * application consumption, use the get snapshot API. * * @see API * specification @@ -72,14 +73,22 @@ public class SnapshotsRequest extends CatRequestBase { @Nullable private final Boolean ignoreUnavailable; + @Nullable + private final Time masterTimeout; + private final List repository; + @Nullable + private final TimeUnit time; + // --------------------------------------------------------------------------------------------- private SnapshotsRequest(Builder builder) { this.ignoreUnavailable = builder.ignoreUnavailable; + this.masterTimeout = builder.masterTimeout; this.repository = ApiTypeHelper.unmodifiable(builder.repository); + this.time = builder.time; } @@ -98,6 +107,16 @@ public final Boolean ignoreUnavailable() { return this.ignoreUnavailable; } + /** + * Period to wait for a connection to the master node. + *

+ * API name: {@code master_timeout} + */ + @Nullable + public final Time masterTimeout() { + return this.masterTimeout; + } + /** * A comma-separated list of snapshot repositories used to limit the request. * Accepts wildcard expressions. _all returns all repositories. If @@ -109,6 +128,16 @@ public final List repository() { return this.repository; } + /** + * Unit used to display time values. + *

+ * API name: {@code time} + */ + @Nullable + public final TimeUnit time() { + return this.time; + } + // --------------------------------------------------------------------------------------------- /** @@ -121,9 +150,15 @@ public static class Builder extends CatRequestBase.AbstractBuilder @Nullable private Boolean ignoreUnavailable; + @Nullable + private Time masterTimeout; + @Nullable private List repository; + @Nullable + private TimeUnit time; + /** * If true, the response does not include information from * unavailable snapshots. @@ -135,6 +170,25 @@ public final Builder ignoreUnavailable(@Nullable Boolean value) { return this; } + /** + * Period to wait for a connection to the master node. + *

+ * API name: {@code master_timeout} + */ + public final Builder masterTimeout(@Nullable Time value) { + this.masterTimeout = value; + return this; + } + + /** + * Period to wait for a connection to the master node. + *

+ * API name: {@code master_timeout} + */ + public final Builder masterTimeout(Function> fn) { + return this.masterTimeout(fn.apply(new Time.Builder()).build()); + } + /** * A comma-separated list of snapshot repositories used to limit the request. * Accepts wildcard expressions. _all returns all repositories. If @@ -163,6 +217,16 @@ public final Builder repository(String value, String... values) { return this; } + /** + * Unit used to display time values. + *

+ * API name: {@code time} + */ + public final Builder time(@Nullable TimeUnit value) { + this.time = value; + return this; + } + @Override protected Builder self() { return this; @@ -245,9 +309,15 @@ public SnapshotsRequest build() { request -> { Map params = new HashMap<>(); params.put("format", "json"); + if (request.masterTimeout != null) { + params.put("master_timeout", request.masterTimeout._toJsonString()); + } if (request.ignoreUnavailable != null) { params.put("ignore_unavailable", String.valueOf(request.ignoreUnavailable)); } + if (request.time != null) { + params.put("time", request.time.jsonValue()); + } return params; }, SimpleEndpoint.emptyMap(), false, SnapshotsResponse._DESERIALIZER); diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/TasksRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/TasksRequest.java index 7abeec6e1..592b5fcd4 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/TasksRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/TasksRequest.java @@ -20,6 +20,8 @@ package co.elastic.clients.elasticsearch.cat; import co.elastic.clients.elasticsearch._types.ErrorResponse; +import co.elastic.clients.elasticsearch._types.Time; +import co.elastic.clients.elasticsearch._types.TimeUnit; import co.elastic.clients.json.JsonpDeserializable; import co.elastic.clients.json.JsonpDeserializer; import co.elastic.clients.json.ObjectBuilderDeserializer; @@ -58,10 +60,10 @@ // typedef: cat.tasks.Request /** - * Get task information. Get information about tasks currently running in the - * cluster. IMPORTANT: cat APIs are only intended for human consumption using - * the command line or Kibana console. They are not intended for use by - * applications. For application consumption, use the task management API. + * Returns information about tasks currently executing in the cluster. + * IMPORTANT: cat APIs are only intended for human consumption using the command + * line or Kibana console. They are not intended for use by applications. For + * application consumption, use the task management API. * * @see API * specification @@ -73,19 +75,31 @@ public class TasksRequest extends CatRequestBase { @Nullable private final Boolean detailed; - private final List nodeId; + private final List nodes; @Nullable private final String parentTaskId; + @Nullable + private final TimeUnit time; + + @Nullable + private final Time timeout; + + @Nullable + private final Boolean waitForCompletion; + // --------------------------------------------------------------------------------------------- private TasksRequest(Builder builder) { this.actions = ApiTypeHelper.unmodifiable(builder.actions); this.detailed = builder.detailed; - this.nodeId = ApiTypeHelper.unmodifiable(builder.nodeId); + this.nodes = ApiTypeHelper.unmodifiable(builder.nodes); this.parentTaskId = builder.parentTaskId; + this.time = builder.time; + this.timeout = builder.timeout; + this.waitForCompletion = builder.waitForCompletion; } @@ -116,10 +130,10 @@ public final Boolean detailed() { /** * Unique node identifiers, which are used to limit the response. *

- * API name: {@code node_id} + * API name: {@code nodes} */ - public final List nodeId() { - return this.nodeId; + public final List nodes() { + return this.nodes; } /** @@ -132,6 +146,37 @@ public final String parentTaskId() { return this.parentTaskId; } + /** + * Unit used to display time values. + *

+ * API name: {@code time} + */ + @Nullable + public final TimeUnit time() { + return this.time; + } + + /** + * Period to wait for a response. If no response is received before the timeout + * expires, the request fails and returns an error. + *

+ * API name: {@code timeout} + */ + @Nullable + public final Time timeout() { + return this.timeout; + } + + /** + * If true, the request blocks until the task has completed. + *

+ * API name: {@code wait_for_completion} + */ + @Nullable + public final Boolean waitForCompletion() { + return this.waitForCompletion; + } + // --------------------------------------------------------------------------------------------- /** @@ -146,11 +191,20 @@ public static class Builder extends CatRequestBase.AbstractBuilder impl private Boolean detailed; @Nullable - private List nodeId; + private List nodes; @Nullable private String parentTaskId; + @Nullable + private TimeUnit time; + + @Nullable + private Time timeout; + + @Nullable + private Boolean waitForCompletion; + /** * The task action names, which are used to limit the response. *

@@ -189,24 +243,24 @@ public final Builder detailed(@Nullable Boolean value) { /** * Unique node identifiers, which are used to limit the response. *

- * API name: {@code node_id} + * API name: {@code nodes} *

- * Adds all elements of list to nodeId. + * Adds all elements of list to nodes. */ - public final Builder nodeId(List list) { - this.nodeId = _listAddAll(this.nodeId, list); + public final Builder nodes(List list) { + this.nodes = _listAddAll(this.nodes, list); return this; } /** * Unique node identifiers, which are used to limit the response. *

- * API name: {@code node_id} + * API name: {@code nodes} *

- * Adds one or more values to nodeId. + * Adds one or more values to nodes. */ - public final Builder nodeId(String value, String... values) { - this.nodeId = _listAdd(this.nodeId, value, values); + public final Builder nodes(String value, String... values) { + this.nodes = _listAdd(this.nodes, value, values); return this; } @@ -220,6 +274,47 @@ public final Builder parentTaskId(@Nullable String value) { return this; } + /** + * Unit used to display time values. + *

+ * API name: {@code time} + */ + public final Builder time(@Nullable TimeUnit value) { + this.time = value; + return this; + } + + /** + * Period to wait for a response. If no response is received before the timeout + * expires, the request fails and returns an error. + *

+ * API name: {@code timeout} + */ + public final Builder timeout(@Nullable Time value) { + this.timeout = value; + return this; + } + + /** + * Period to wait for a response. If no response is received before the timeout + * expires, the request fails and returns an error. + *

+ * API name: {@code timeout} + */ + public final Builder timeout(Function> fn) { + return this.timeout(fn.apply(new Time.Builder()).build()); + } + + /** + * If true, the request blocks until the task has completed. + *

+ * API name: {@code wait_for_completion} + */ + public final Builder waitForCompletion(@Nullable Boolean value) { + this.waitForCompletion = value; + return this; + } + @Override protected Builder self() { return this; @@ -267,17 +362,26 @@ public TasksRequest build() { request -> { Map params = new HashMap<>(); params.put("format", "json"); + if (ApiTypeHelper.isDefined(request.nodes)) { + params.put("nodes", request.nodes.stream().map(v -> v).collect(Collectors.joining(","))); + } if (request.parentTaskId != null) { params.put("parent_task_id", request.parentTaskId); } if (request.detailed != null) { params.put("detailed", String.valueOf(request.detailed)); } + if (request.time != null) { + params.put("time", request.time.jsonValue()); + } if (ApiTypeHelper.isDefined(request.actions)) { params.put("actions", request.actions.stream().map(v -> v).collect(Collectors.joining(","))); } - if (ApiTypeHelper.isDefined(request.nodeId)) { - params.put("node_id", request.nodeId.stream().map(v -> v).collect(Collectors.joining(","))); + if (request.waitForCompletion != null) { + params.put("wait_for_completion", String.valueOf(request.waitForCompletion)); + } + if (request.timeout != null) { + params.put("timeout", request.timeout._toJsonString()); } return params; diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/TemplatesRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/TemplatesRequest.java index 150e9d865..229396232 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/TemplatesRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/TemplatesRequest.java @@ -20,6 +20,7 @@ package co.elastic.clients.elasticsearch.cat; import co.elastic.clients.elasticsearch._types.ErrorResponse; +import co.elastic.clients.elasticsearch._types.Time; import co.elastic.clients.json.JsonpDeserializable; import co.elastic.clients.json.JsonpDeserializer; import co.elastic.clients.json.ObjectBuilderDeserializer; @@ -54,12 +55,11 @@ // typedef: cat.templates.Request /** - * Get index template information. Get information about the index templates in - * a cluster. You can use index templates to apply index settings and field - * mappings to new indices at creation. IMPORTANT: cat APIs are only intended - * for human consumption using the command line or Kibana console. They are not - * intended for use by applications. For application consumption, use the get - * index template API. + * Returns information about index templates in a cluster. You can use index + * templates to apply index settings and field mappings to new indices at + * creation. IMPORTANT: cat APIs are only intended for human consumption using + * the command line or Kibana console. They are not intended for use by + * applications. For application consumption, use the get index template API. * * @see API * specification @@ -69,6 +69,9 @@ public class TemplatesRequest extends CatRequestBase { @Nullable private final Boolean local; + @Nullable + private final Time masterTimeout; + @Nullable private final String name; @@ -77,6 +80,7 @@ public class TemplatesRequest extends CatRequestBase { private TemplatesRequest(Builder builder) { this.local = builder.local; + this.masterTimeout = builder.masterTimeout; this.name = builder.name; } @@ -99,6 +103,16 @@ public final Boolean local() { return this.local; } + /** + * Period to wait for a connection to the master node. + *

+ * API name: {@code master_timeout} + */ + @Nullable + public final Time masterTimeout() { + return this.masterTimeout; + } + /** * The name of the template to return. Accepts wildcard expressions. If omitted, * all templates are returned. @@ -122,6 +136,9 @@ public static class Builder extends CatRequestBase.AbstractBuilder @Nullable private Boolean local; + @Nullable + private Time masterTimeout; + @Nullable private String name; @@ -139,6 +156,25 @@ public final Builder local(@Nullable Boolean value) { return this; } + /** + * Period to wait for a connection to the master node. + *

+ * API name: {@code master_timeout} + */ + public final Builder masterTimeout(@Nullable Time value) { + this.masterTimeout = value; + return this; + } + + /** + * Period to wait for a connection to the master node. + *

+ * API name: {@code master_timeout} + */ + public final Builder masterTimeout(Function> fn) { + return this.masterTimeout(fn.apply(new Time.Builder()).build()); + } + /** * The name of the template to return. Accepts wildcard expressions. If omitted, * all templates are returned. @@ -231,6 +267,9 @@ public TemplatesRequest build() { request -> { Map params = new HashMap<>(); params.put("format", "json"); + if (request.masterTimeout != null) { + params.put("master_timeout", request.masterTimeout._toJsonString()); + } if (request.local != null) { params.put("local", String.valueOf(request.local)); } diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/ThreadPoolRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/ThreadPoolRequest.java index b9b79ea4f..c4f0fdd6b 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/ThreadPoolRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/ThreadPoolRequest.java @@ -20,6 +20,7 @@ package co.elastic.clients.elasticsearch.cat; import co.elastic.clients.elasticsearch._types.ErrorResponse; +import co.elastic.clients.elasticsearch._types.Time; import co.elastic.clients.elasticsearch._types.TimeUnit; import co.elastic.clients.json.JsonpDeserializable; import co.elastic.clients.json.JsonpDeserializer; @@ -58,11 +59,11 @@ // typedef: cat.thread_pool.Request /** - * Get thread pool statistics. Get thread pool statistics for each node in a - * cluster. Returned information includes all built-in thread pools and custom - * thread pools. IMPORTANT: cat APIs are only intended for human consumption - * using the command line or Kibana console. They are not intended for use by - * applications. For application consumption, use the nodes info API. + * Returns thread pool statistics for each node in a cluster. Returned + * information includes all built-in thread pools and custom thread pools. + * IMPORTANT: cat APIs are only intended for human consumption using the command + * line or Kibana console. They are not intended for use by applications. For + * application consumption, use the nodes info API. * * @see API * specification @@ -72,6 +73,9 @@ public class ThreadPoolRequest extends CatRequestBase { @Nullable private final Boolean local; + @Nullable + private final Time masterTimeout; + private final List threadPoolPatterns; @Nullable @@ -82,6 +86,7 @@ public class ThreadPoolRequest extends CatRequestBase { private ThreadPoolRequest(Builder builder) { this.local = builder.local; + this.masterTimeout = builder.masterTimeout; this.threadPoolPatterns = ApiTypeHelper.unmodifiable(builder.threadPoolPatterns); this.time = builder.time; @@ -105,6 +110,16 @@ public final Boolean local() { return this.local; } + /** + * Period to wait for a connection to the master node. + *

+ * API name: {@code master_timeout} + */ + @Nullable + public final Time masterTimeout() { + return this.masterTimeout; + } + /** * A comma-separated list of thread pool names used to limit the request. * Accepts wildcard expressions. @@ -137,6 +152,9 @@ public static class Builder extends CatRequestBase.AbstractBuilder @Nullable private Boolean local; + @Nullable + private Time masterTimeout; + @Nullable private List threadPoolPatterns; @@ -157,6 +175,25 @@ public final Builder local(@Nullable Boolean value) { return this; } + /** + * Period to wait for a connection to the master node. + *

+ * API name: {@code master_timeout} + */ + public final Builder masterTimeout(@Nullable Time value) { + this.masterTimeout = value; + return this; + } + + /** + * Period to wait for a connection to the master node. + *

+ * API name: {@code master_timeout} + */ + public final Builder masterTimeout(Function> fn) { + return this.masterTimeout(fn.apply(new Time.Builder()).build()); + } + /** * A comma-separated list of thread pool names used to limit the request. * Accepts wildcard expressions. @@ -276,6 +313,9 @@ public ThreadPoolRequest build() { request -> { Map params = new HashMap<>(); params.put("format", "json"); + if (request.masterTimeout != null) { + params.put("master_timeout", request.masterTimeout._toJsonString()); + } if (request.time != null) { params.put("time", request.time.jsonValue()); } diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/TransformsRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/TransformsRequest.java index c8a3bd62f..095b25d87 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/TransformsRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/TransformsRequest.java @@ -59,8 +59,7 @@ // typedef: cat.transforms.Request /** - * Get transform information. Get configuration and usage information about - * transforms. + * Get transforms. Returns configuration and usage information about transforms. *

* CAT APIs are only intended for human consumption using the Kibana console or * command line. They are not intended for use by applications. For application diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/ccr/FollowRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/ccr/FollowRequest.java index 7bd6ab476..b66a8257a 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/ccr/FollowRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/ccr/FollowRequest.java @@ -23,7 +23,6 @@ import co.elastic.clients.elasticsearch._types.RequestBase; import co.elastic.clients.elasticsearch._types.Time; import co.elastic.clients.elasticsearch._types.WaitForActiveShards; -import co.elastic.clients.elasticsearch.indices.IndexSettings; import co.elastic.clients.json.JsonpDeserializable; import co.elastic.clients.json.JsonpDeserializer; import co.elastic.clients.json.JsonpMapper; @@ -35,7 +34,6 @@ import co.elastic.clients.util.ApiTypeHelper; import co.elastic.clients.util.ObjectBuilder; import jakarta.json.stream.JsonGenerator; -import java.lang.Integer; import java.lang.Long; import java.lang.String; import java.util.HashMap; @@ -72,21 +70,19 @@ */ @JsonpDeserializable public class FollowRequest extends RequestBase implements JsonpSerializable { - @Nullable - private final String dataStreamName; - private final String index; + @Nullable private final String leaderIndex; @Nullable private final Long maxOutstandingReadRequests; @Nullable - private final Integer maxOutstandingWriteRequests; + private final Long maxOutstandingWriteRequests; @Nullable - private final Integer maxReadRequestOperationCount; + private final Long maxReadRequestOperationCount; @Nullable private final String maxReadRequestSize; @@ -95,13 +91,13 @@ public class FollowRequest extends RequestBase implements JsonpSerializable { private final Time maxRetryDelay; @Nullable - private final Integer maxWriteBufferCount; + private final Long maxWriteBufferCount; @Nullable private final String maxWriteBufferSize; @Nullable - private final Integer maxWriteRequestOperationCount; + private final Long maxWriteRequestOperationCount; @Nullable private final String maxWriteRequestSize; @@ -109,10 +105,8 @@ public class FollowRequest extends RequestBase implements JsonpSerializable { @Nullable private final Time readPollTimeout; - private final String remoteCluster; - @Nullable - private final IndexSettings settings; + private final String remoteCluster; @Nullable private final WaitForActiveShards waitForActiveShards; @@ -121,9 +115,8 @@ public class FollowRequest extends RequestBase implements JsonpSerializable { private FollowRequest(Builder builder) { - this.dataStreamName = builder.dataStreamName; this.index = ApiTypeHelper.requireNonNull(builder.index, this, "index"); - this.leaderIndex = ApiTypeHelper.requireNonNull(builder.leaderIndex, this, "leaderIndex"); + this.leaderIndex = builder.leaderIndex; this.maxOutstandingReadRequests = builder.maxOutstandingReadRequests; this.maxOutstandingWriteRequests = builder.maxOutstandingWriteRequests; this.maxReadRequestOperationCount = builder.maxReadRequestOperationCount; @@ -134,8 +127,7 @@ private FollowRequest(Builder builder) { this.maxWriteRequestOperationCount = builder.maxWriteRequestOperationCount; this.maxWriteRequestSize = builder.maxWriteRequestSize; this.readPollTimeout = builder.readPollTimeout; - this.remoteCluster = ApiTypeHelper.requireNonNull(builder.remoteCluster, this, "remoteCluster"); - this.settings = builder.settings; + this.remoteCluster = builder.remoteCluster; this.waitForActiveShards = builder.waitForActiveShards; } @@ -145,18 +137,7 @@ public static FollowRequest of(Function> f } /** - * If the leader index is part of a data stream, the name to which the local - * data stream for the followed index should be renamed. - *

- * API name: {@code data_stream_name} - */ - @Nullable - public final String dataStreamName() { - return this.dataStreamName; - } - - /** - * Required - The name of the follower index. + * Required - The name of the follower index *

* API name: {@code index} */ @@ -165,17 +146,14 @@ public final String index() { } /** - * Required - The name of the index in the leader cluster to follow. - *

* API name: {@code leader_index} */ + @Nullable public final String leaderIndex() { return this.leaderIndex; } /** - * The maximum number of outstanding reads requests from the remote cluster. - *

* API name: {@code max_outstanding_read_requests} */ @Nullable @@ -184,29 +162,22 @@ public final Long maxOutstandingReadRequests() { } /** - * The maximum number of outstanding write requests on the follower. - *

* API name: {@code max_outstanding_write_requests} */ @Nullable - public final Integer maxOutstandingWriteRequests() { + public final Long maxOutstandingWriteRequests() { return this.maxOutstandingWriteRequests; } /** - * The maximum number of operations to pull per read from the remote cluster. - *

* API name: {@code max_read_request_operation_count} */ @Nullable - public final Integer maxReadRequestOperationCount() { + public final Long maxReadRequestOperationCount() { return this.maxReadRequestOperationCount; } /** - * The maximum size in bytes of per read of a batch of operations pulled from - * the remote cluster. - *

* API name: {@code max_read_request_size} */ @Nullable @@ -215,9 +186,6 @@ public final String maxReadRequestSize() { } /** - * The maximum time to wait before retrying an operation that failed - * exceptionally. An exponential backoff strategy is employed when retrying. - *

* API name: {@code max_retry_delay} */ @Nullable @@ -226,22 +194,14 @@ public final Time maxRetryDelay() { } /** - * The maximum number of operations that can be queued for writing. When this - * limit is reached, reads from the remote cluster will be deferred until the - * number of queued operations goes below the limit. - *

* API name: {@code max_write_buffer_count} */ @Nullable - public final Integer maxWriteBufferCount() { + public final Long maxWriteBufferCount() { return this.maxWriteBufferCount; } /** - * The maximum total bytes of operations that can be queued for writing. When - * this limit is reached, reads from the remote cluster will be deferred until - * the total bytes of queued operations goes below the limit. - *

* API name: {@code max_write_buffer_size} */ @Nullable @@ -250,20 +210,14 @@ public final String maxWriteBufferSize() { } /** - * The maximum number of operations per bulk write request executed on the - * follower. - *

* API name: {@code max_write_request_operation_count} */ @Nullable - public final Integer maxWriteRequestOperationCount() { + public final Long maxWriteRequestOperationCount() { return this.maxWriteRequestOperationCount; } /** - * The maximum total bytes of operations per bulk write request executed on the - * follower. - *

* API name: {@code max_write_request_size} */ @Nullable @@ -272,12 +226,6 @@ public final String maxWriteRequestSize() { } /** - * The maximum time to wait for new operations on the remote cluster when the - * follower index is synchronized with the leader index. When the timeout has - * elapsed, the poll for operations will return to the follower so that it can - * update some statistics. Then the follower will immediately attempt to read - * from the leader again. - *

* API name: {@code read_poll_timeout} */ @Nullable @@ -286,30 +234,18 @@ public final Time readPollTimeout() { } /** - * Required - The remote cluster containing the leader index. - *

* API name: {@code remote_cluster} */ + @Nullable public final String remoteCluster() { return this.remoteCluster; } /** - * Settings to override from the leader index. - *

- * API name: {@code settings} - */ - @Nullable - public final IndexSettings settings() { - return this.settings; - } - - /** - * Specifies the number of shards to wait on being active before responding. - * This defaults to waiting on none of the shards to be active. A shard must be - * restored from the leader index before being active. Restoring a follower - * shard requires transferring all the remote Lucene segment files to the - * follower index. + * Sets the number of shard copies that must be active before returning. + * Defaults to 0. Set to all for all shard copies, otherwise set to + * any non-negative value less than or equal to the total number of copies for + * the shard (number of replicas + 1) *

* API name: {@code wait_for_active_shards} */ @@ -329,14 +265,11 @@ public void serialize(JsonGenerator generator, JsonpMapper mapper) { protected void serializeInternal(JsonGenerator generator, JsonpMapper mapper) { - if (this.dataStreamName != null) { - generator.writeKey("data_stream_name"); - generator.write(this.dataStreamName); + if (this.leaderIndex != null) { + generator.writeKey("leader_index"); + generator.write(this.leaderIndex); } - generator.writeKey("leader_index"); - generator.write(this.leaderIndex); - if (this.maxOutstandingReadRequests != null) { generator.writeKey("max_outstanding_read_requests"); generator.write(this.maxOutstandingReadRequests); @@ -387,12 +320,9 @@ protected void serializeInternal(JsonGenerator generator, JsonpMapper mapper) { this.readPollTimeout.serialize(generator, mapper); } - generator.writeKey("remote_cluster"); - generator.write(this.remoteCluster); - - if (this.settings != null) { - generator.writeKey("settings"); - this.settings.serialize(generator, mapper); + if (this.remoteCluster != null) { + generator.writeKey("remote_cluster"); + generator.write(this.remoteCluster); } @@ -405,21 +335,19 @@ protected void serializeInternal(JsonGenerator generator, JsonpMapper mapper) { */ public static class Builder extends RequestBase.AbstractBuilder implements ObjectBuilder { - @Nullable - private String dataStreamName; - private String index; + @Nullable private String leaderIndex; @Nullable private Long maxOutstandingReadRequests; @Nullable - private Integer maxOutstandingWriteRequests; + private Long maxOutstandingWriteRequests; @Nullable - private Integer maxReadRequestOperationCount; + private Long maxReadRequestOperationCount; @Nullable private String maxReadRequestSize; @@ -428,13 +356,13 @@ public static class Builder extends RequestBase.AbstractBuilder impleme private Time maxRetryDelay; @Nullable - private Integer maxWriteBufferCount; + private Long maxWriteBufferCount; @Nullable private String maxWriteBufferSize; @Nullable - private Integer maxWriteRequestOperationCount; + private Long maxWriteRequestOperationCount; @Nullable private String maxWriteRequestSize; @@ -442,27 +370,14 @@ public static class Builder extends RequestBase.AbstractBuilder impleme @Nullable private Time readPollTimeout; - private String remoteCluster; - @Nullable - private IndexSettings settings; + private String remoteCluster; @Nullable private WaitForActiveShards waitForActiveShards; /** - * If the leader index is part of a data stream, the name to which the local - * data stream for the followed index should be renamed. - *

- * API name: {@code data_stream_name} - */ - public final Builder dataStreamName(@Nullable String value) { - this.dataStreamName = value; - return this; - } - - /** - * Required - The name of the follower index. + * Required - The name of the follower index *

* API name: {@code index} */ @@ -472,18 +387,14 @@ public final Builder index(String value) { } /** - * Required - The name of the index in the leader cluster to follow. - *

* API name: {@code leader_index} */ - public final Builder leaderIndex(String value) { + public final Builder leaderIndex(@Nullable String value) { this.leaderIndex = value; return this; } /** - * The maximum number of outstanding reads requests from the remote cluster. - *

* API name: {@code max_outstanding_read_requests} */ public final Builder maxOutstandingReadRequests(@Nullable Long value) { @@ -492,29 +403,22 @@ public final Builder maxOutstandingReadRequests(@Nullable Long value) { } /** - * The maximum number of outstanding write requests on the follower. - *

* API name: {@code max_outstanding_write_requests} */ - public final Builder maxOutstandingWriteRequests(@Nullable Integer value) { + public final Builder maxOutstandingWriteRequests(@Nullable Long value) { this.maxOutstandingWriteRequests = value; return this; } /** - * The maximum number of operations to pull per read from the remote cluster. - *

* API name: {@code max_read_request_operation_count} */ - public final Builder maxReadRequestOperationCount(@Nullable Integer value) { + public final Builder maxReadRequestOperationCount(@Nullable Long value) { this.maxReadRequestOperationCount = value; return this; } /** - * The maximum size in bytes of per read of a batch of operations pulled from - * the remote cluster. - *

* API name: {@code max_read_request_size} */ public final Builder maxReadRequestSize(@Nullable String value) { @@ -523,9 +427,6 @@ public final Builder maxReadRequestSize(@Nullable String value) { } /** - * The maximum time to wait before retrying an operation that failed - * exceptionally. An exponential backoff strategy is employed when retrying. - *

* API name: {@code max_retry_delay} */ public final Builder maxRetryDelay(@Nullable Time value) { @@ -534,9 +435,6 @@ public final Builder maxRetryDelay(@Nullable Time value) { } /** - * The maximum time to wait before retrying an operation that failed - * exceptionally. An exponential backoff strategy is employed when retrying. - *

* API name: {@code max_retry_delay} */ public final Builder maxRetryDelay(Function> fn) { @@ -544,22 +442,14 @@ public final Builder maxRetryDelay(Function> f } /** - * The maximum number of operations that can be queued for writing. When this - * limit is reached, reads from the remote cluster will be deferred until the - * number of queued operations goes below the limit. - *

* API name: {@code max_write_buffer_count} */ - public final Builder maxWriteBufferCount(@Nullable Integer value) { + public final Builder maxWriteBufferCount(@Nullable Long value) { this.maxWriteBufferCount = value; return this; } /** - * The maximum total bytes of operations that can be queued for writing. When - * this limit is reached, reads from the remote cluster will be deferred until - * the total bytes of queued operations goes below the limit. - *

* API name: {@code max_write_buffer_size} */ public final Builder maxWriteBufferSize(@Nullable String value) { @@ -568,20 +458,14 @@ public final Builder maxWriteBufferSize(@Nullable String value) { } /** - * The maximum number of operations per bulk write request executed on the - * follower. - *

* API name: {@code max_write_request_operation_count} */ - public final Builder maxWriteRequestOperationCount(@Nullable Integer value) { + public final Builder maxWriteRequestOperationCount(@Nullable Long value) { this.maxWriteRequestOperationCount = value; return this; } /** - * The maximum total bytes of operations per bulk write request executed on the - * follower. - *

* API name: {@code max_write_request_size} */ public final Builder maxWriteRequestSize(@Nullable String value) { @@ -590,12 +474,6 @@ public final Builder maxWriteRequestSize(@Nullable String value) { } /** - * The maximum time to wait for new operations on the remote cluster when the - * follower index is synchronized with the leader index. When the timeout has - * elapsed, the poll for operations will return to the follower so that it can - * update some statistics. Then the follower will immediately attempt to read - * from the leader again. - *

* API name: {@code read_poll_timeout} */ public final Builder readPollTimeout(@Nullable Time value) { @@ -604,12 +482,6 @@ public final Builder readPollTimeout(@Nullable Time value) { } /** - * The maximum time to wait for new operations on the remote cluster when the - * follower index is synchronized with the leader index. When the timeout has - * elapsed, the poll for operations will return to the follower so that it can - * update some statistics. Then the follower will immediately attempt to read - * from the leader again. - *

* API name: {@code read_poll_timeout} */ public final Builder readPollTimeout(Function> fn) { @@ -617,40 +489,18 @@ public final Builder readPollTimeout(Function> } /** - * Required - The remote cluster containing the leader index. - *

* API name: {@code remote_cluster} */ - public final Builder remoteCluster(String value) { + public final Builder remoteCluster(@Nullable String value) { this.remoteCluster = value; return this; } /** - * Settings to override from the leader index. - *

- * API name: {@code settings} - */ - public final Builder settings(@Nullable IndexSettings value) { - this.settings = value; - return this; - } - - /** - * Settings to override from the leader index. - *

- * API name: {@code settings} - */ - public final Builder settings(Function> fn) { - return this.settings(fn.apply(new IndexSettings.Builder()).build()); - } - - /** - * Specifies the number of shards to wait on being active before responding. - * This defaults to waiting on none of the shards to be active. A shard must be - * restored from the leader index before being active. Restoring a follower - * shard requires transferring all the remote Lucene segment files to the - * follower index. + * Sets the number of shard copies that must be active before returning. + * Defaults to 0. Set to all for all shard copies, otherwise set to + * any non-negative value less than or equal to the total number of copies for + * the shard (number of replicas + 1) *

* API name: {@code wait_for_active_shards} */ @@ -660,11 +510,10 @@ public final Builder waitForActiveShards(@Nullable WaitForActiveShards value) { } /** - * Specifies the number of shards to wait on being active before responding. - * This defaults to waiting on none of the shards to be active. A shard must be - * restored from the leader index before being active. Restoring a follower - * shard requires transferring all the remote Lucene segment files to the - * follower index. + * Sets the number of shard copies that must be active before returning. + * Defaults to 0. Set to all for all shard copies, otherwise set to + * any non-negative value less than or equal to the total number of copies for + * the shard (number of replicas + 1) *

* API name: {@code wait_for_active_shards} */ @@ -701,24 +550,22 @@ public FollowRequest build() { protected static void setupFollowRequestDeserializer(ObjectDeserializer op) { - op.add(Builder::dataStreamName, JsonpDeserializer.stringDeserializer(), "data_stream_name"); op.add(Builder::leaderIndex, JsonpDeserializer.stringDeserializer(), "leader_index"); op.add(Builder::maxOutstandingReadRequests, JsonpDeserializer.longDeserializer(), "max_outstanding_read_requests"); - op.add(Builder::maxOutstandingWriteRequests, JsonpDeserializer.integerDeserializer(), + op.add(Builder::maxOutstandingWriteRequests, JsonpDeserializer.longDeserializer(), "max_outstanding_write_requests"); - op.add(Builder::maxReadRequestOperationCount, JsonpDeserializer.integerDeserializer(), + op.add(Builder::maxReadRequestOperationCount, JsonpDeserializer.longDeserializer(), "max_read_request_operation_count"); op.add(Builder::maxReadRequestSize, JsonpDeserializer.stringDeserializer(), "max_read_request_size"); op.add(Builder::maxRetryDelay, Time._DESERIALIZER, "max_retry_delay"); - op.add(Builder::maxWriteBufferCount, JsonpDeserializer.integerDeserializer(), "max_write_buffer_count"); + op.add(Builder::maxWriteBufferCount, JsonpDeserializer.longDeserializer(), "max_write_buffer_count"); op.add(Builder::maxWriteBufferSize, JsonpDeserializer.stringDeserializer(), "max_write_buffer_size"); - op.add(Builder::maxWriteRequestOperationCount, JsonpDeserializer.integerDeserializer(), + op.add(Builder::maxWriteRequestOperationCount, JsonpDeserializer.longDeserializer(), "max_write_request_operation_count"); op.add(Builder::maxWriteRequestSize, JsonpDeserializer.stringDeserializer(), "max_write_request_size"); op.add(Builder::readPollTimeout, Time._DESERIALIZER, "read_poll_timeout"); op.add(Builder::remoteCluster, JsonpDeserializer.stringDeserializer(), "remote_cluster"); - op.add(Builder::settings, IndexSettings._DESERIALIZER, "settings"); } diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/ccr/follow_info/FollowerIndexParameters.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/ccr/follow_info/FollowerIndexParameters.java index ed314baf5..cd9fc3ed2 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/ccr/follow_info/FollowerIndexParameters.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/ccr/follow_info/FollowerIndexParameters.java @@ -27,11 +27,11 @@ import co.elastic.clients.json.JsonpUtils; import co.elastic.clients.json.ObjectBuilderDeserializer; import co.elastic.clients.json.ObjectDeserializer; +import co.elastic.clients.util.ApiTypeHelper; import co.elastic.clients.util.ObjectBuilder; import co.elastic.clients.util.WithJsonObjectBuilderBase; import jakarta.json.stream.JsonGenerator; import java.lang.Integer; -import java.lang.Long; import java.lang.String; import java.util.Objects; import java.util.function.Function; @@ -62,50 +62,46 @@ */ @JsonpDeserializable public class FollowerIndexParameters implements JsonpSerializable { - @Nullable - private final Long maxOutstandingReadRequests; + private final int maxOutstandingReadRequests; - @Nullable - private final Integer maxOutstandingWriteRequests; + private final int maxOutstandingWriteRequests; - @Nullable - private final Integer maxReadRequestOperationCount; + private final int maxReadRequestOperationCount; - @Nullable private final String maxReadRequestSize; - @Nullable private final Time maxRetryDelay; - @Nullable - private final Integer maxWriteBufferCount; + private final int maxWriteBufferCount; - @Nullable private final String maxWriteBufferSize; - @Nullable - private final Integer maxWriteRequestOperationCount; + private final int maxWriteRequestOperationCount; - @Nullable private final String maxWriteRequestSize; - @Nullable private final Time readPollTimeout; // --------------------------------------------------------------------------------------------- private FollowerIndexParameters(Builder builder) { - this.maxOutstandingReadRequests = builder.maxOutstandingReadRequests; - this.maxOutstandingWriteRequests = builder.maxOutstandingWriteRequests; - this.maxReadRequestOperationCount = builder.maxReadRequestOperationCount; - this.maxReadRequestSize = builder.maxReadRequestSize; - this.maxRetryDelay = builder.maxRetryDelay; - this.maxWriteBufferCount = builder.maxWriteBufferCount; - this.maxWriteBufferSize = builder.maxWriteBufferSize; - this.maxWriteRequestOperationCount = builder.maxWriteRequestOperationCount; - this.maxWriteRequestSize = builder.maxWriteRequestSize; - this.readPollTimeout = builder.readPollTimeout; + this.maxOutstandingReadRequests = ApiTypeHelper.requireNonNull(builder.maxOutstandingReadRequests, this, + "maxOutstandingReadRequests"); + this.maxOutstandingWriteRequests = ApiTypeHelper.requireNonNull(builder.maxOutstandingWriteRequests, this, + "maxOutstandingWriteRequests"); + this.maxReadRequestOperationCount = ApiTypeHelper.requireNonNull(builder.maxReadRequestOperationCount, this, + "maxReadRequestOperationCount"); + this.maxReadRequestSize = ApiTypeHelper.requireNonNull(builder.maxReadRequestSize, this, "maxReadRequestSize"); + this.maxRetryDelay = ApiTypeHelper.requireNonNull(builder.maxRetryDelay, this, "maxRetryDelay"); + this.maxWriteBufferCount = ApiTypeHelper.requireNonNull(builder.maxWriteBufferCount, this, + "maxWriteBufferCount"); + this.maxWriteBufferSize = ApiTypeHelper.requireNonNull(builder.maxWriteBufferSize, this, "maxWriteBufferSize"); + this.maxWriteRequestOperationCount = ApiTypeHelper.requireNonNull(builder.maxWriteRequestOperationCount, this, + "maxWriteRequestOperationCount"); + this.maxWriteRequestSize = ApiTypeHelper.requireNonNull(builder.maxWriteRequestSize, this, + "maxWriteRequestSize"); + this.readPollTimeout = ApiTypeHelper.requireNonNull(builder.readPollTimeout, this, "readPollTimeout"); } @@ -114,113 +110,71 @@ public static FollowerIndexParameters of(Function - * API name: {@code max_outstanding_read_requests} + * Required - API name: {@code max_outstanding_read_requests} */ - @Nullable - public final Long maxOutstandingReadRequests() { + public final int maxOutstandingReadRequests() { return this.maxOutstandingReadRequests; } /** - * The maximum number of outstanding write requests on the follower. - *

- * API name: {@code max_outstanding_write_requests} + * Required - API name: {@code max_outstanding_write_requests} */ - @Nullable - public final Integer maxOutstandingWriteRequests() { + public final int maxOutstandingWriteRequests() { return this.maxOutstandingWriteRequests; } /** - * The maximum number of operations to pull per read from the remote cluster. - *

- * API name: {@code max_read_request_operation_count} + * Required - API name: {@code max_read_request_operation_count} */ - @Nullable - public final Integer maxReadRequestOperationCount() { + public final int maxReadRequestOperationCount() { return this.maxReadRequestOperationCount; } /** - * The maximum size in bytes of per read of a batch of operations pulled from - * the remote cluster. - *

- * API name: {@code max_read_request_size} + * Required - API name: {@code max_read_request_size} */ - @Nullable public final String maxReadRequestSize() { return this.maxReadRequestSize; } /** - * The maximum time to wait before retrying an operation that failed - * exceptionally. An exponential backoff strategy is employed when retrying. - *

- * API name: {@code max_retry_delay} + * Required - API name: {@code max_retry_delay} */ - @Nullable public final Time maxRetryDelay() { return this.maxRetryDelay; } /** - * The maximum number of operations that can be queued for writing. When this - * limit is reached, reads from the remote cluster will be deferred until the - * number of queued operations goes below the limit. - *

- * API name: {@code max_write_buffer_count} + * Required - API name: {@code max_write_buffer_count} */ - @Nullable - public final Integer maxWriteBufferCount() { + public final int maxWriteBufferCount() { return this.maxWriteBufferCount; } /** - * The maximum total bytes of operations that can be queued for writing. When - * this limit is reached, reads from the remote cluster will be deferred until - * the total bytes of queued operations goes below the limit. - *

- * API name: {@code max_write_buffer_size} + * Required - API name: {@code max_write_buffer_size} */ - @Nullable public final String maxWriteBufferSize() { return this.maxWriteBufferSize; } /** - * The maximum number of operations per bulk write request executed on the - * follower. - *

- * API name: {@code max_write_request_operation_count} + * Required - API name: {@code max_write_request_operation_count} */ - @Nullable - public final Integer maxWriteRequestOperationCount() { + public final int maxWriteRequestOperationCount() { return this.maxWriteRequestOperationCount; } /** - * The maximum total bytes of operations per bulk write request executed on the - * follower. - *

- * API name: {@code max_write_request_size} + * Required - API name: {@code max_write_request_size} */ - @Nullable public final String maxWriteRequestSize() { return this.maxWriteRequestSize; } /** - * The maximum time to wait for new operations on the remote cluster when the - * follower index is synchronized with the leader index. When the timeout has - * elapsed, the poll for operations will return to the follower so that it can - * update some statistics. Then the follower will immediately attempt to read - * from the leader again. - *

- * API name: {@code read_poll_timeout} + * Required - API name: {@code read_poll_timeout} */ - @Nullable public final Time readPollTimeout() { return this.readPollTimeout; } @@ -236,56 +190,35 @@ public void serialize(JsonGenerator generator, JsonpMapper mapper) { protected void serializeInternal(JsonGenerator generator, JsonpMapper mapper) { - if (this.maxOutstandingReadRequests != null) { - generator.writeKey("max_outstanding_read_requests"); - generator.write(this.maxOutstandingReadRequests); + generator.writeKey("max_outstanding_read_requests"); + generator.write(this.maxOutstandingReadRequests); - } - if (this.maxOutstandingWriteRequests != null) { - generator.writeKey("max_outstanding_write_requests"); - generator.write(this.maxOutstandingWriteRequests); + generator.writeKey("max_outstanding_write_requests"); + generator.write(this.maxOutstandingWriteRequests); - } - if (this.maxReadRequestOperationCount != null) { - generator.writeKey("max_read_request_operation_count"); - generator.write(this.maxReadRequestOperationCount); + generator.writeKey("max_read_request_operation_count"); + generator.write(this.maxReadRequestOperationCount); - } - if (this.maxReadRequestSize != null) { - generator.writeKey("max_read_request_size"); - generator.write(this.maxReadRequestSize); + generator.writeKey("max_read_request_size"); + generator.write(this.maxReadRequestSize); - } - if (this.maxRetryDelay != null) { - generator.writeKey("max_retry_delay"); - this.maxRetryDelay.serialize(generator, mapper); + generator.writeKey("max_retry_delay"); + this.maxRetryDelay.serialize(generator, mapper); - } - if (this.maxWriteBufferCount != null) { - generator.writeKey("max_write_buffer_count"); - generator.write(this.maxWriteBufferCount); + generator.writeKey("max_write_buffer_count"); + generator.write(this.maxWriteBufferCount); - } - if (this.maxWriteBufferSize != null) { - generator.writeKey("max_write_buffer_size"); - generator.write(this.maxWriteBufferSize); + generator.writeKey("max_write_buffer_size"); + generator.write(this.maxWriteBufferSize); - } - if (this.maxWriteRequestOperationCount != null) { - generator.writeKey("max_write_request_operation_count"); - generator.write(this.maxWriteRequestOperationCount); + generator.writeKey("max_write_request_operation_count"); + generator.write(this.maxWriteRequestOperationCount); - } - if (this.maxWriteRequestSize != null) { - generator.writeKey("max_write_request_size"); - generator.write(this.maxWriteRequestSize); + generator.writeKey("max_write_request_size"); + generator.write(this.maxWriteRequestSize); - } - if (this.readPollTimeout != null) { - generator.writeKey("read_poll_timeout"); - this.readPollTimeout.serialize(generator, mapper); - - } + generator.writeKey("read_poll_timeout"); + this.readPollTimeout.serialize(generator, mapper); } @@ -303,166 +236,115 @@ public String toString() { public static class Builder extends WithJsonObjectBuilderBase implements ObjectBuilder { - @Nullable - private Long maxOutstandingReadRequests; + private Integer maxOutstandingReadRequests; - @Nullable private Integer maxOutstandingWriteRequests; - @Nullable private Integer maxReadRequestOperationCount; - @Nullable private String maxReadRequestSize; - @Nullable private Time maxRetryDelay; - @Nullable private Integer maxWriteBufferCount; - @Nullable private String maxWriteBufferSize; - @Nullable private Integer maxWriteRequestOperationCount; - @Nullable private String maxWriteRequestSize; - @Nullable private Time readPollTimeout; /** - * The maximum number of outstanding reads requests from the remote cluster. - *

- * API name: {@code max_outstanding_read_requests} + * Required - API name: {@code max_outstanding_read_requests} */ - public final Builder maxOutstandingReadRequests(@Nullable Long value) { + public final Builder maxOutstandingReadRequests(int value) { this.maxOutstandingReadRequests = value; return this; } /** - * The maximum number of outstanding write requests on the follower. - *

- * API name: {@code max_outstanding_write_requests} + * Required - API name: {@code max_outstanding_write_requests} */ - public final Builder maxOutstandingWriteRequests(@Nullable Integer value) { + public final Builder maxOutstandingWriteRequests(int value) { this.maxOutstandingWriteRequests = value; return this; } /** - * The maximum number of operations to pull per read from the remote cluster. - *

- * API name: {@code max_read_request_operation_count} + * Required - API name: {@code max_read_request_operation_count} */ - public final Builder maxReadRequestOperationCount(@Nullable Integer value) { + public final Builder maxReadRequestOperationCount(int value) { this.maxReadRequestOperationCount = value; return this; } /** - * The maximum size in bytes of per read of a batch of operations pulled from - * the remote cluster. - *

- * API name: {@code max_read_request_size} + * Required - API name: {@code max_read_request_size} */ - public final Builder maxReadRequestSize(@Nullable String value) { + public final Builder maxReadRequestSize(String value) { this.maxReadRequestSize = value; return this; } /** - * The maximum time to wait before retrying an operation that failed - * exceptionally. An exponential backoff strategy is employed when retrying. - *

- * API name: {@code max_retry_delay} + * Required - API name: {@code max_retry_delay} */ - public final Builder maxRetryDelay(@Nullable Time value) { + public final Builder maxRetryDelay(Time value) { this.maxRetryDelay = value; return this; } /** - * The maximum time to wait before retrying an operation that failed - * exceptionally. An exponential backoff strategy is employed when retrying. - *

- * API name: {@code max_retry_delay} + * Required - API name: {@code max_retry_delay} */ public final Builder maxRetryDelay(Function> fn) { return this.maxRetryDelay(fn.apply(new Time.Builder()).build()); } /** - * The maximum number of operations that can be queued for writing. When this - * limit is reached, reads from the remote cluster will be deferred until the - * number of queued operations goes below the limit. - *

- * API name: {@code max_write_buffer_count} + * Required - API name: {@code max_write_buffer_count} */ - public final Builder maxWriteBufferCount(@Nullable Integer value) { + public final Builder maxWriteBufferCount(int value) { this.maxWriteBufferCount = value; return this; } /** - * The maximum total bytes of operations that can be queued for writing. When - * this limit is reached, reads from the remote cluster will be deferred until - * the total bytes of queued operations goes below the limit. - *

- * API name: {@code max_write_buffer_size} + * Required - API name: {@code max_write_buffer_size} */ - public final Builder maxWriteBufferSize(@Nullable String value) { + public final Builder maxWriteBufferSize(String value) { this.maxWriteBufferSize = value; return this; } /** - * The maximum number of operations per bulk write request executed on the - * follower. - *

- * API name: {@code max_write_request_operation_count} + * Required - API name: {@code max_write_request_operation_count} */ - public final Builder maxWriteRequestOperationCount(@Nullable Integer value) { + public final Builder maxWriteRequestOperationCount(int value) { this.maxWriteRequestOperationCount = value; return this; } /** - * The maximum total bytes of operations per bulk write request executed on the - * follower. - *

- * API name: {@code max_write_request_size} + * Required - API name: {@code max_write_request_size} */ - public final Builder maxWriteRequestSize(@Nullable String value) { + public final Builder maxWriteRequestSize(String value) { this.maxWriteRequestSize = value; return this; } /** - * The maximum time to wait for new operations on the remote cluster when the - * follower index is synchronized with the leader index. When the timeout has - * elapsed, the poll for operations will return to the follower so that it can - * update some statistics. Then the follower will immediately attempt to read - * from the leader again. - *

- * API name: {@code read_poll_timeout} + * Required - API name: {@code read_poll_timeout} */ - public final Builder readPollTimeout(@Nullable Time value) { + public final Builder readPollTimeout(Time value) { this.readPollTimeout = value; return this; } /** - * The maximum time to wait for new operations on the remote cluster when the - * follower index is synchronized with the leader index. When the timeout has - * elapsed, the poll for operations will return to the follower so that it can - * update some statistics. Then the follower will immediately attempt to read - * from the leader again. - *

- * API name: {@code read_poll_timeout} + * Required - API name: {@code read_poll_timeout} */ public final Builder readPollTimeout(Function> fn) { return this.readPollTimeout(fn.apply(new Time.Builder()).build()); @@ -497,7 +379,7 @@ public FollowerIndexParameters build() { protected static void setupFollowerIndexParametersDeserializer( ObjectDeserializer op) { - op.add(Builder::maxOutstandingReadRequests, JsonpDeserializer.longDeserializer(), + op.add(Builder::maxOutstandingReadRequests, JsonpDeserializer.integerDeserializer(), "max_outstanding_read_requests"); op.add(Builder::maxOutstandingWriteRequests, JsonpDeserializer.integerDeserializer(), "max_outstanding_write_requests"); diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/core/SearchRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/core/SearchRequest.java index 1d39e93b8..f03adcb03 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/core/SearchRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/core/SearchRequest.java @@ -168,6 +168,9 @@ public class SearchRequest extends RequestBase implements JsonpSerializable { @Nullable private final Long maxConcurrentShardRequests; + @Nullable + private final String minCompatibleShardNode; + @Nullable private final Double minScore; @@ -281,6 +284,7 @@ private SearchRequest(Builder builder) { this.knn = ApiTypeHelper.unmodifiable(builder.knn); this.lenient = builder.lenient; this.maxConcurrentShardRequests = builder.maxConcurrentShardRequests; + this.minCompatibleShardNode = builder.minCompatibleShardNode; this.minScore = builder.minScore; this.pit = builder.pit; this.postFilter = builder.postFilter; @@ -609,6 +613,17 @@ public final Long maxConcurrentShardRequests() { return this.maxConcurrentShardRequests; } + /** + * The minimum version of the node that can handle the request Any handling node + * with a lower version will fail the request. + *

+ * API name: {@code min_compatible_shard_node} + */ + @Nullable + public final String minCompatibleShardNode() { + return this.minCompatibleShardNode; + } + /** * Minimum _score for matching documents. Documents with a lower * _score are not included in the search results. @@ -1319,6 +1334,9 @@ public static class Builder extends RequestBase.AbstractBuilder impleme @Nullable private Long maxConcurrentShardRequests; + @Nullable + private String minCompatibleShardNode; + @Nullable private Double minScore; @@ -1906,6 +1924,17 @@ public final Builder maxConcurrentShardRequests(@Nullable Long value) { return this; } + /** + * The minimum version of the node that can handle the request Any handling node + * with a lower version will fail the request. + *

+ * API name: {@code min_compatible_shard_node} + */ + public final Builder minCompatibleShardNode(@Nullable String value) { + this.minCompatibleShardNode = value; + return this; + } + /** * Minimum _score for matching documents. Documents with a lower * _score are not included in the search results. @@ -2744,6 +2773,9 @@ protected static void setupSearchRequestDeserializer(ObjectDeserializer implement @Nullable private ShardsCapacityIndicator shardsCapacity; - @Nullable - private FileSettingsIndicator fileSettings; - /** * API name: {@code master_is_stable} */ @@ -400,22 +380,6 @@ public final Builder shardsCapacity( return this.shardsCapacity(fn.apply(new ShardsCapacityIndicator.Builder()).build()); } - /** - * API name: {@code file_settings} - */ - public final Builder fileSettings(@Nullable FileSettingsIndicator value) { - this.fileSettings = value; - return this; - } - - /** - * API name: {@code file_settings} - */ - public final Builder fileSettings( - Function> fn) { - return this.fileSettings(fn.apply(new FileSettingsIndicator.Builder()).build()); - } - @Override protected Builder self() { return this; @@ -452,7 +416,6 @@ protected static void setupIndicatorsDeserializer(ObjectDeserializer 1) { hash = hash.substring(1); } - window.location = "https://github.com/elastic/elasticsearch-specification/tree/fd5d7cff3f7e72348ac3ebf32bfadd7be9243b86/specification/" + (paths[hash] || ""); + window.location = "https://github.com/elastic/elasticsearch-specification/tree/b8071e8a539550d8cbaee3bae954ed4cc98d8422/specification/" + (paths[hash] || ""); - Please see the Elasticsearch API specification. + Please see the Elasticsearch API specification. diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/eql/EqlSearchRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/eql/EqlSearchRequest.java index 603e1074b..a06bce4f4 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/eql/EqlSearchRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/eql/EqlSearchRequest.java @@ -39,7 +39,6 @@ import co.elastic.clients.util.ObjectBuilder; import jakarta.json.stream.JsonGenerator; import java.lang.Boolean; -import java.lang.Integer; import java.lang.Number; import java.lang.String; import java.util.HashMap; @@ -106,9 +105,6 @@ public class EqlSearchRequest extends RequestBase implements JsonpSerializable { @Nullable private final Boolean keepOnCompletion; - @Nullable - private final Integer maxSamplesPerKey; - private final String query; @Nullable @@ -143,7 +139,6 @@ private EqlSearchRequest(Builder builder) { this.index = ApiTypeHelper.unmodifiableRequired(builder.index, this, "index"); this.keepAlive = builder.keepAlive; this.keepOnCompletion = builder.keepOnCompletion; - this.maxSamplesPerKey = builder.maxSamplesPerKey; this.query = ApiTypeHelper.requireNonNull(builder.query, this, "query"); this.resultPosition = builder.resultPosition; this.runtimeMappings = ApiTypeHelper.unmodifiable(builder.runtimeMappings); @@ -256,21 +251,6 @@ public final Boolean keepOnCompletion() { return this.keepOnCompletion; } - /** - * By default, the response of a sample query contains up to 10 - * samples, with one sample per unique set of join keys. Use the - * size parameter to get a smaller or larger set of samples. To - * retrieve more than one sample per set of join keys, use the - * max_samples_per_key parameter. Pipes are not supported for - * sample queries. - *

- * API name: {@code max_samples_per_key} - */ - @Nullable - public final Integer maxSamplesPerKey() { - return this.maxSamplesPerKey; - } - /** * Required - EQL query you wish to run. *

@@ -389,11 +369,6 @@ protected void serializeInternal(JsonGenerator generator, JsonpMapper mapper) { generator.writeKey("keep_on_completion"); generator.write(this.keepOnCompletion); - } - if (this.maxSamplesPerKey != null) { - generator.writeKey("max_samples_per_key"); - generator.write(this.maxSamplesPerKey); - } generator.writeKey("query"); generator.write(this.query); @@ -477,9 +452,6 @@ public static class Builder extends RequestBase.AbstractBuilder @Nullable private Boolean keepOnCompletion; - @Nullable - private Integer maxSamplesPerKey; - private String query; @Nullable @@ -689,21 +661,6 @@ public final Builder keepOnCompletion(@Nullable Boolean value) { return this; } - /** - * By default, the response of a sample query contains up to 10 - * samples, with one sample per unique set of join keys. Use the - * size parameter to get a smaller or larger set of samples. To - * retrieve more than one sample per set of join keys, use the - * max_samples_per_key parameter. Pipes are not supported for - * sample queries. - *

- * API name: {@code max_samples_per_key} - */ - public final Builder maxSamplesPerKey(@Nullable Integer value) { - this.maxSamplesPerKey = value; - return this; - } - /** * Required - EQL query you wish to run. *

@@ -833,7 +790,6 @@ protected static void setupEqlSearchRequestDeserializer(ObjectDeserializerDocumentation on elastic.co */ @@ -88,9 +88,9 @@ public CompletableFuture> search(Flee } /** - * Run a Fleet search. The purpose of the Fleet search API is to provide an API - * where the search will be run only after the provided checkpoint has been - * processed and is visible for searches inside of Elasticsearch. + * The purpose of the fleet search api is to provide a search api where the + * search will only be executed after provided checkpoint has been processed and + * is visible for searches inside of Elasticsearch. * * @param fn * a function that initializes a builder to create the @@ -105,9 +105,9 @@ public final CompletableFuture> searc } /** - * Run a Fleet search. The purpose of the Fleet search API is to provide an API - * where the search will be run only after the provided checkpoint has been - * processed and is visible for searches inside of Elasticsearch. + * The purpose of the fleet search api is to provide a search api where the + * search will only be executed after provided checkpoint has been processed and + * is visible for searches inside of Elasticsearch. * * @see Documentation on elastic.co */ @@ -123,9 +123,9 @@ public CompletableFuture> search(Flee } /** - * Run a Fleet search. The purpose of the Fleet search API is to provide an API - * where the search will be run only after the provided checkpoint has been - * processed and is visible for searches inside of Elasticsearch. + * The purpose of the fleet search api is to provide a search api where the + * search will only be executed after provided checkpoint has been processed and + * is visible for searches inside of Elasticsearch. * * @param fn * a function that initializes a builder to create the diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/fleet/ElasticsearchFleetClient.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/fleet/ElasticsearchFleetClient.java index 30503174d..6cc870710 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/fleet/ElasticsearchFleetClient.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/fleet/ElasticsearchFleetClient.java @@ -70,9 +70,9 @@ public ElasticsearchFleetClient withTransportOptions(@Nullable TransportOptions // ----- Endpoint: fleet.search /** - * Run a Fleet search. The purpose of the Fleet search API is to provide an API - * where the search will be run only after the provided checkpoint has been - * processed and is visible for searches inside of Elasticsearch. + * The purpose of the fleet search api is to provide a search api where the + * search will only be executed after provided checkpoint has been processed and + * is visible for searches inside of Elasticsearch. * * @see Documentation on elastic.co */ @@ -88,9 +88,9 @@ public FleetSearchResponse search(FleetSearchRequest requ } /** - * Run a Fleet search. The purpose of the Fleet search API is to provide an API - * where the search will be run only after the provided checkpoint has been - * processed and is visible for searches inside of Elasticsearch. + * The purpose of the fleet search api is to provide a search api where the + * search will only be executed after provided checkpoint has been processed and + * is visible for searches inside of Elasticsearch. * * @param fn * a function that initializes a builder to create the @@ -105,9 +105,9 @@ public final FleetSearchResponse search( } /** - * Run a Fleet search. The purpose of the Fleet search API is to provide an API - * where the search will be run only after the provided checkpoint has been - * processed and is visible for searches inside of Elasticsearch. + * The purpose of the fleet search api is to provide a search api where the + * search will only be executed after provided checkpoint has been processed and + * is visible for searches inside of Elasticsearch. * * @see Documentation on elastic.co */ @@ -123,9 +123,9 @@ public FleetSearchResponse search(FleetSearchRequest requ } /** - * Run a Fleet search. The purpose of the Fleet search API is to provide an API - * where the search will be run only after the provided checkpoint has been - * processed and is visible for searches inside of Elasticsearch. + * The purpose of the fleet search api is to provide a search api where the + * search will only be executed after provided checkpoint has been processed and + * is visible for searches inside of Elasticsearch. * * @param fn * a function that initializes a builder to create the diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/fleet/FleetSearchRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/fleet/FleetSearchRequest.java index 1fd937219..898fa6d7f 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/fleet/FleetSearchRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/fleet/FleetSearchRequest.java @@ -84,9 +84,9 @@ // typedef: fleet.search.Request /** - * Run a Fleet search. The purpose of the Fleet search API is to provide an API - * where the search will be run only after the provided checkpoint has been - * processed and is visible for searches inside of Elasticsearch. + * The purpose of the fleet search api is to provide a search api where the + * search will only be executed after provided checkpoint has been processed and + * is visible for searches inside of Elasticsearch. * * @see API * specification @@ -158,6 +158,9 @@ public class FleetSearchRequest extends RequestBase implements JsonpSerializable @Nullable private final Long maxConcurrentShardRequests; + @Nullable + private final String minCompatibleShardNode; + @Nullable private final Double minScore; @@ -265,6 +268,7 @@ private FleetSearchRequest(Builder builder) { this.indicesBoost = ApiTypeHelper.unmodifiable(builder.indicesBoost); this.lenient = builder.lenient; this.maxConcurrentShardRequests = builder.maxConcurrentShardRequests; + this.minCompatibleShardNode = builder.minCompatibleShardNode; this.minScore = builder.minScore; this.pit = builder.pit; this.postFilter = builder.postFilter; @@ -516,6 +520,14 @@ public final Long maxConcurrentShardRequests() { return this.maxConcurrentShardRequests; } + /** + * API name: {@code min_compatible_shard_node} + */ + @Nullable + public final String minCompatibleShardNode() { + return this.minCompatibleShardNode; + } + /** * Minimum _score for matching documents. Documents with a lower _score are not * included in the search results. @@ -1120,6 +1132,9 @@ public static class Builder extends RequestBase.AbstractBuilder @Nullable private Long maxConcurrentShardRequests; + @Nullable + private String minCompatibleShardNode; + @Nullable private Double minScore; @@ -1568,6 +1583,14 @@ public final Builder maxConcurrentShardRequests(@Nullable Long value) { return this; } + /** + * API name: {@code min_compatible_shard_node} + */ + public final Builder minCompatibleShardNode(@Nullable String value) { + this.minCompatibleShardNode = value; + return this; + } + /** * Minimum _score for matching documents. Documents with a lower _score are not * included in the search results. @@ -2266,6 +2289,9 @@ protected static void setupFleetSearchRequestDeserializer(ObjectDeserializer builder) { + private DataStreamLifecycle(Builder builder) { this.dataRetention = builder.dataRetention; this.downsampling = builder.downsampling; - this.enabled = builder.enabled; } - public static DataStreamLifecycle dataStreamLifecycleOf(Function> fn) { + public static DataStreamLifecycle of(Function> fn) { return fn.apply(new Builder()).build(); } /** - * If defined, every document added to this data stream will be stored at least - * for this time frame. Any time after this duration the document could be - * deleted. When empty, every document in this data stream will be stored - * indefinitely. - *

* API name: {@code data_retention} */ @Nullable @@ -99,9 +89,6 @@ public final Time dataRetention() { } /** - * The downsampling configuration to execute for the managed backing index after - * rollover. - *

* API name: {@code downsampling} */ @Nullable @@ -109,19 +96,6 @@ public final DataStreamLifecycleDownsampling downsampling() { return this.downsampling; } - /** - * If defined, it turns data stream lifecycle on/off - * (true/false) for this data stream. A data stream - * lifecycle that's disabled (enabled: false) will have no effect - * on the data stream. - *

- * API name: {@code enabled} - */ - @Nullable - public final Boolean enabled() { - return this.enabled; - } - /** * Serialize this object to JSON. */ @@ -143,11 +117,6 @@ protected void serializeInternal(JsonGenerator generator, JsonpMapper mapper) { this.downsampling.serialize(generator, mapper); } - if (this.enabled != null) { - generator.writeKey("enabled"); - generator.write(this.enabled); - - } } @@ -162,101 +131,62 @@ public String toString() { * Builder for {@link DataStreamLifecycle}. */ - public static class Builder extends DataStreamLifecycle.AbstractBuilder + public static class Builder extends WithJsonObjectBuilderBase implements ObjectBuilder { - @Override - protected Builder self() { - return this; - } - - /** - * Builds a {@link DataStreamLifecycle}. - * - * @throws NullPointerException - * if some of the required fields are null. - */ - public DataStreamLifecycle build() { - _checkSingleUse(); - - return new DataStreamLifecycle(this); - } - } - - public abstract static class AbstractBuilder> - extends - WithJsonObjectBuilderBase { @Nullable private Time dataRetention; @Nullable private DataStreamLifecycleDownsampling downsampling; - @Nullable - private Boolean enabled; - /** - * If defined, every document added to this data stream will be stored at least - * for this time frame. Any time after this duration the document could be - * deleted. When empty, every document in this data stream will be stored - * indefinitely. - *

* API name: {@code data_retention} */ - public final BuilderT dataRetention(@Nullable Time value) { + public final Builder dataRetention(@Nullable Time value) { this.dataRetention = value; - return self(); + return this; } /** - * If defined, every document added to this data stream will be stored at least - * for this time frame. Any time after this duration the document could be - * deleted. When empty, every document in this data stream will be stored - * indefinitely. - *

* API name: {@code data_retention} */ - public final BuilderT dataRetention(Function> fn) { + public final Builder dataRetention(Function> fn) { return this.dataRetention(fn.apply(new Time.Builder()).build()); } /** - * The downsampling configuration to execute for the managed backing index after - * rollover. - *

* API name: {@code downsampling} */ - public final BuilderT downsampling(@Nullable DataStreamLifecycleDownsampling value) { + public final Builder downsampling(@Nullable DataStreamLifecycleDownsampling value) { this.downsampling = value; - return self(); + return this; } /** - * The downsampling configuration to execute for the managed backing index after - * rollover. - *

* API name: {@code downsampling} */ - public final BuilderT downsampling( + public final Builder downsampling( Function> fn) { return this.downsampling(fn.apply(new DataStreamLifecycleDownsampling.Builder()).build()); } - /** - * If defined, it turns data stream lifecycle on/off - * (true/false) for this data stream. A data stream - * lifecycle that's disabled (enabled: false) will have no effect - * on the data stream. - *

- * API name: {@code enabled} - */ - public final BuilderT enabled(@Nullable Boolean value) { - this.enabled = value; - return self(); + @Override + protected Builder self() { + return this; } - protected abstract BuilderT self(); + /** + * Builds a {@link DataStreamLifecycle}. + * + * @throws NullPointerException + * if some of the required fields are null. + */ + public DataStreamLifecycle build() { + _checkSingleUse(); + return new DataStreamLifecycle(this); + } } // --------------------------------------------------------------------------------------------- @@ -267,12 +197,10 @@ public final BuilderT enabled(@Nullable Boolean value) { public static final JsonpDeserializer _DESERIALIZER = ObjectBuilderDeserializer .lazy(Builder::new, DataStreamLifecycle::setupDataStreamLifecycleDeserializer); - protected static > void setupDataStreamLifecycleDeserializer( - ObjectDeserializer op) { + protected static void setupDataStreamLifecycleDeserializer(ObjectDeserializer op) { - op.add(AbstractBuilder::dataRetention, Time._DESERIALIZER, "data_retention"); - op.add(AbstractBuilder::downsampling, DataStreamLifecycleDownsampling._DESERIALIZER, "downsampling"); - op.add(AbstractBuilder::enabled, JsonpDeserializer.booleanDeserializer(), "enabled"); + op.add(Builder::dataRetention, Time._DESERIALIZER, "data_retention"); + op.add(Builder::downsampling, DataStreamLifecycleDownsampling._DESERIALIZER, "downsampling"); } diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/DataStreamLifecycleWithRollover.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/DataStreamLifecycleWithRollover.java index b16773b7d..14e94e6e1 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/DataStreamLifecycleWithRollover.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/DataStreamLifecycleWithRollover.java @@ -19,12 +19,16 @@ package co.elastic.clients.elasticsearch.indices; +import co.elastic.clients.elasticsearch._types.Time; import co.elastic.clients.json.JsonpDeserializable; import co.elastic.clients.json.JsonpDeserializer; import co.elastic.clients.json.JsonpMapper; +import co.elastic.clients.json.JsonpSerializable; +import co.elastic.clients.json.JsonpUtils; import co.elastic.clients.json.ObjectBuilderDeserializer; import co.elastic.clients.json.ObjectDeserializer; import co.elastic.clients.util.ObjectBuilder; +import co.elastic.clients.util.WithJsonObjectBuilderBase; import jakarta.json.stream.JsonGenerator; import java.util.Objects; import java.util.function.Function; @@ -56,15 +60,22 @@ * specification */ @JsonpDeserializable -public class DataStreamLifecycleWithRollover extends DataStreamLifecycle { +public class DataStreamLifecycleWithRollover implements JsonpSerializable { + @Nullable + private final Time dataRetention; + + @Nullable + private final DataStreamLifecycleDownsampling downsampling; + @Nullable private final DataStreamLifecycleRolloverConditions rollover; // --------------------------------------------------------------------------------------------- private DataStreamLifecycleWithRollover(Builder builder) { - super(builder); + this.dataRetention = builder.dataRetention; + this.downsampling = builder.downsampling; this.rollover = builder.rollover; } @@ -74,6 +85,30 @@ public static DataStreamLifecycleWithRollover of( return fn.apply(new Builder()).build(); } + /** + * If defined, every document added to this data stream will be stored at least + * for this time frame. Any time after this duration the document could be + * deleted. When empty, every document in this data stream will be stored + * indefinitely. + *

+ * API name: {@code data_retention} + */ + @Nullable + public final Time dataRetention() { + return this.dataRetention; + } + + /** + * The downsampling configuration to execute for the managed backing index after + * rollover. + *

+ * API name: {@code downsampling} + */ + @Nullable + public final DataStreamLifecycleDownsampling downsampling() { + return this.downsampling; + } + /** * The conditions which will trigger the rollover of a backing index as * configured by the cluster setting @@ -89,9 +124,27 @@ public final DataStreamLifecycleRolloverConditions rollover() { return this.rollover; } + /** + * Serialize this object to JSON. + */ + public void serialize(JsonGenerator generator, JsonpMapper mapper) { + generator.writeStartObject(); + serializeInternal(generator, mapper); + generator.writeEnd(); + } + protected void serializeInternal(JsonGenerator generator, JsonpMapper mapper) { - super.serializeInternal(generator, mapper); + if (this.dataRetention != null) { + generator.writeKey("data_retention"); + this.dataRetention.serialize(generator, mapper); + + } + if (this.downsampling != null) { + generator.writeKey("downsampling"); + this.downsampling.serialize(generator, mapper); + + } if (this.rollover != null) { generator.writeKey("rollover"); this.rollover.serialize(generator, mapper); @@ -100,18 +153,76 @@ protected void serializeInternal(JsonGenerator generator, JsonpMapper mapper) { } + @Override + public String toString() { + return JsonpUtils.toString(this); + } + // --------------------------------------------------------------------------------------------- /** * Builder for {@link DataStreamLifecycleWithRollover}. */ - public static class Builder extends DataStreamLifecycle.AbstractBuilder + public static class Builder extends WithJsonObjectBuilderBase implements ObjectBuilder { + @Nullable + private Time dataRetention; + + @Nullable + private DataStreamLifecycleDownsampling downsampling; + @Nullable private DataStreamLifecycleRolloverConditions rollover; + /** + * If defined, every document added to this data stream will be stored at least + * for this time frame. Any time after this duration the document could be + * deleted. When empty, every document in this data stream will be stored + * indefinitely. + *

+ * API name: {@code data_retention} + */ + public final Builder dataRetention(@Nullable Time value) { + this.dataRetention = value; + return this; + } + + /** + * If defined, every document added to this data stream will be stored at least + * for this time frame. Any time after this duration the document could be + * deleted. When empty, every document in this data stream will be stored + * indefinitely. + *

+ * API name: {@code data_retention} + */ + public final Builder dataRetention(Function> fn) { + return this.dataRetention(fn.apply(new Time.Builder()).build()); + } + + /** + * The downsampling configuration to execute for the managed backing index after + * rollover. + *

+ * API name: {@code downsampling} + */ + public final Builder downsampling(@Nullable DataStreamLifecycleDownsampling value) { + this.downsampling = value; + return this; + } + + /** + * The downsampling configuration to execute for the managed backing index after + * rollover. + *

+ * API name: {@code downsampling} + */ + public final Builder downsampling( + Function> fn) { + return this.downsampling(fn.apply(new DataStreamLifecycleDownsampling.Builder()).build()); + } + /** * The conditions which will trigger the rollover of a backing index as * configured by the cluster setting @@ -170,7 +281,9 @@ public DataStreamLifecycleWithRollover build() { protected static void setupDataStreamLifecycleWithRolloverDeserializer( ObjectDeserializer op) { - DataStreamLifecycle.setupDataStreamLifecycleDeserializer(op); + + op.add(Builder::dataRetention, Time._DESERIALIZER, "data_retention"); + op.add(Builder::downsampling, DataStreamLifecycleDownsampling._DESERIALIZER, "downsampling"); op.add(Builder::rollover, DataStreamLifecycleRolloverConditions._DESERIALIZER, "rollover"); } diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/ExistsAliasRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/ExistsAliasRequest.java index afa873138..9a441a4f8 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/ExistsAliasRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/ExistsAliasRequest.java @@ -22,7 +22,6 @@ import co.elastic.clients.elasticsearch._types.ErrorResponse; import co.elastic.clients.elasticsearch._types.ExpandWildcard; import co.elastic.clients.elasticsearch._types.RequestBase; -import co.elastic.clients.elasticsearch._types.Time; import co.elastic.clients.json.JsonpDeserializable; import co.elastic.clients.json.JsonpDeserializer; import co.elastic.clients.json.ObjectBuilderDeserializer; @@ -80,7 +79,7 @@ public class ExistsAliasRequest extends RequestBase { private final List index; @Nullable - private final Time masterTimeout; + private final Boolean local; private final List name; @@ -92,7 +91,7 @@ private ExistsAliasRequest(Builder builder) { this.expandWildcards = ApiTypeHelper.unmodifiable(builder.expandWildcards); this.ignoreUnavailable = builder.ignoreUnavailable; this.index = ApiTypeHelper.unmodifiable(builder.index); - this.masterTimeout = builder.masterTimeout; + this.local = builder.local; this.name = ApiTypeHelper.unmodifiableRequired(builder.name, this, "name"); } @@ -151,14 +150,17 @@ public final List index() { } /** - * Period to wait for a connection to the master node. If no response is - * received before the timeout expires, the request fails and returns an error. + * If true, the request retrieves information from the local node + * only. *

- * API name: {@code master_timeout} + * API name: {@code local} + * + * @deprecated 8.12.0 */ + @Deprecated @Nullable - public final Time masterTimeout() { - return this.masterTimeout; + public final Boolean local() { + return this.local; } /** @@ -193,7 +195,7 @@ public static class Builder extends RequestBase.AbstractBuilder private List index; @Nullable - private Time masterTimeout; + private Boolean local; private List name; @@ -284,26 +286,19 @@ public final Builder index(String value, String... values) { } /** - * Period to wait for a connection to the master node. If no response is - * received before the timeout expires, the request fails and returns an error. + * If true, the request retrieves information from the local node + * only. *

- * API name: {@code master_timeout} + * API name: {@code local} + * + * @deprecated 8.12.0 */ - public final Builder masterTimeout(@Nullable Time value) { - this.masterTimeout = value; + @Deprecated + public final Builder local(@Nullable Boolean value) { + this.local = value; return this; } - /** - * Period to wait for a connection to the master node. If no response is - * received before the timeout expires, the request fails and returns an error. - *

- * API name: {@code master_timeout} - */ - public final Builder masterTimeout(Function> fn) { - return this.masterTimeout(fn.apply(new Time.Builder()).build()); - } - /** * Required - Comma-separated list of aliases to check. Supports wildcards * (*). @@ -418,9 +413,6 @@ public ExistsAliasRequest build() { // Request parameters request -> { Map params = new HashMap<>(); - if (request.masterTimeout != null) { - params.put("master_timeout", request.masterTimeout._toJsonString()); - } if (ApiTypeHelper.isDefined(request.expandWildcards)) { params.put("expand_wildcards", request.expandWildcards.stream().map(v -> v.jsonValue()).collect(Collectors.joining(","))); @@ -431,6 +423,9 @@ public ExistsAliasRequest build() { if (request.allowNoIndices != null) { params.put("allow_no_indices", String.valueOf(request.allowNoIndices)); } + if (request.local != null) { + params.put("local", String.valueOf(request.local)); + } return params; }, SimpleEndpoint.emptyMap(), false, null); diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/GetAliasRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/GetAliasRequest.java index 4e1d9c815..d8d19b1c5 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/GetAliasRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/GetAliasRequest.java @@ -22,7 +22,6 @@ import co.elastic.clients.elasticsearch._types.ErrorResponse; import co.elastic.clients.elasticsearch._types.ExpandWildcard; import co.elastic.clients.elasticsearch._types.RequestBase; -import co.elastic.clients.elasticsearch._types.Time; import co.elastic.clients.json.JsonpDeserializable; import co.elastic.clients.json.JsonpDeserializer; import co.elastic.clients.json.ObjectBuilderDeserializer; @@ -79,7 +78,7 @@ public class GetAliasRequest extends RequestBase { private final List index; @Nullable - private final Time masterTimeout; + private final Boolean local; private final List name; @@ -91,7 +90,7 @@ private GetAliasRequest(Builder builder) { this.expandWildcards = ApiTypeHelper.unmodifiable(builder.expandWildcards); this.ignoreUnavailable = builder.ignoreUnavailable; this.index = ApiTypeHelper.unmodifiable(builder.index); - this.masterTimeout = builder.masterTimeout; + this.local = builder.local; this.name = ApiTypeHelper.unmodifiable(builder.name); } @@ -150,14 +149,17 @@ public final List index() { } /** - * Period to wait for a connection to the master node. If no response is - * received before the timeout expires, the request fails and returns an error. + * If true, the request retrieves information from the local node + * only. *

- * API name: {@code master_timeout} + * API name: {@code local} + * + * @deprecated 8.12.0 */ + @Deprecated @Nullable - public final Time masterTimeout() { - return this.masterTimeout; + public final Boolean local() { + return this.local; } /** @@ -191,7 +193,7 @@ public static class Builder extends RequestBase.AbstractBuilder impleme private List index; @Nullable - private Time masterTimeout; + private Boolean local; @Nullable private List name; @@ -283,26 +285,19 @@ public final Builder index(String value, String... values) { } /** - * Period to wait for a connection to the master node. If no response is - * received before the timeout expires, the request fails and returns an error. + * If true, the request retrieves information from the local node + * only. *

- * API name: {@code master_timeout} + * API name: {@code local} + * + * @deprecated 8.12.0 */ - public final Builder masterTimeout(@Nullable Time value) { - this.masterTimeout = value; + @Deprecated + public final Builder local(@Nullable Boolean value) { + this.local = value; return this; } - /** - * Period to wait for a connection to the master node. If no response is - * received before the timeout expires, the request fails and returns an error. - *

- * API name: {@code master_timeout} - */ - public final Builder masterTimeout(Function> fn) { - return this.masterTimeout(fn.apply(new Time.Builder()).build()); - } - /** * Comma-separated list of aliases to retrieve. Supports wildcards * (*). To retrieve all aliases, omit this parameter or use @@ -438,9 +433,6 @@ public GetAliasRequest build() { // Request parameters request -> { Map params = new HashMap<>(); - if (request.masterTimeout != null) { - params.put("master_timeout", request.masterTimeout._toJsonString()); - } if (ApiTypeHelper.isDefined(request.expandWildcards)) { params.put("expand_wildcards", request.expandWildcards.stream().map(v -> v.jsonValue()).collect(Collectors.joining(","))); @@ -451,6 +443,9 @@ public GetAliasRequest build() { if (request.allowNoIndices != null) { params.put("allow_no_indices", String.valueOf(request.allowNoIndices)); } + if (request.local != null) { + params.put("local", String.valueOf(request.local)); + } return params; }, SimpleEndpoint.emptyMap(), false, GetAliasResponse._DESERIALIZER); diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/PutDataLifecycleRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/PutDataLifecycleRequest.java index 52b6a95b6..00ad95dc1 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/PutDataLifecycleRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/PutDataLifecycleRequest.java @@ -34,7 +34,6 @@ import co.elastic.clients.util.ApiTypeHelper; import co.elastic.clients.util.ObjectBuilder; import jakarta.json.stream.JsonGenerator; -import jakarta.json.stream.JsonParser; import java.lang.String; import java.util.HashMap; import java.util.List; @@ -71,6 +70,12 @@ */ @JsonpDeserializable public class PutDataLifecycleRequest extends RequestBase implements JsonpSerializable { + @Nullable + private final Time dataRetention; + + @Nullable + private final DataStreamLifecycleDownsampling downsampling; + private final List expandWildcards; @Nullable @@ -81,17 +86,16 @@ public class PutDataLifecycleRequest extends RequestBase implements JsonpSeriali @Nullable private final Time timeout; - private final DataStreamLifecycle lifecycle; - // --------------------------------------------------------------------------------------------- private PutDataLifecycleRequest(Builder builder) { + this.dataRetention = builder.dataRetention; + this.downsampling = builder.downsampling; this.expandWildcards = ApiTypeHelper.unmodifiable(builder.expandWildcards); this.masterTimeout = builder.masterTimeout; this.name = ApiTypeHelper.unmodifiableRequired(builder.name, this, "name"); this.timeout = builder.timeout; - this.lifecycle = ApiTypeHelper.requireNonNull(builder.lifecycle, this, "lifecycle"); } @@ -99,6 +103,31 @@ public static PutDataLifecycleRequest of(Function + * API name: {@code data_retention} + */ + @Nullable + public final Time dataRetention() { + return this.dataRetention; + } + + /** + * If defined, every backing index will execute the configured downsampling + * configuration after the backing index is not the data stream write index + * anymore. + *

+ * API name: {@code downsampling} + */ + @Nullable + public final DataStreamLifecycleDownsampling downsampling() { + return this.downsampling; + } + /** * Type of data stream that wildcard patterns can match. Supports * comma-separated values, such as open,hidden. Valid values are: @@ -145,17 +174,26 @@ public final Time timeout() { } /** - * Required - Request body. + * Serialize this object to JSON. */ - public final DataStreamLifecycle lifecycle() { - return this.lifecycle; + public void serialize(JsonGenerator generator, JsonpMapper mapper) { + generator.writeStartObject(); + serializeInternal(generator, mapper); + generator.writeEnd(); } - /** - * Serialize this value to JSON. - */ - public void serialize(JsonGenerator generator, JsonpMapper mapper) { - this.lifecycle.serialize(generator, mapper); + protected void serializeInternal(JsonGenerator generator, JsonpMapper mapper) { + + if (this.dataRetention != null) { + generator.writeKey("data_retention"); + this.dataRetention.serialize(generator, mapper); + + } + if (this.downsampling != null) { + generator.writeKey("downsampling"); + this.downsampling.serialize(generator, mapper); + + } } @@ -168,6 +206,12 @@ public void serialize(JsonGenerator generator, JsonpMapper mapper) { public static class Builder extends RequestBase.AbstractBuilder implements ObjectBuilder { + @Nullable + private Time dataRetention; + + @Nullable + private DataStreamLifecycleDownsampling downsampling; + @Nullable private List expandWildcards; @@ -179,7 +223,54 @@ public static class Builder extends RequestBase.AbstractBuilder @Nullable private Time timeout; - private DataStreamLifecycle lifecycle; + /** + * If defined, every document added to this data stream will be stored at least + * for this time frame. Any time after this duration the document could be + * deleted. When empty, every document in this data stream will be stored + * indefinitely. + *

+ * API name: {@code data_retention} + */ + public final Builder dataRetention(@Nullable Time value) { + this.dataRetention = value; + return this; + } + + /** + * If defined, every document added to this data stream will be stored at least + * for this time frame. Any time after this duration the document could be + * deleted. When empty, every document in this data stream will be stored + * indefinitely. + *

+ * API name: {@code data_retention} + */ + public final Builder dataRetention(Function> fn) { + return this.dataRetention(fn.apply(new Time.Builder()).build()); + } + + /** + * If defined, every backing index will execute the configured downsampling + * configuration after the backing index is not the data stream write index + * anymore. + *

+ * API name: {@code downsampling} + */ + public final Builder downsampling(@Nullable DataStreamLifecycleDownsampling value) { + this.downsampling = value; + return this; + } + + /** + * If defined, every backing index will execute the configured downsampling + * configuration after the backing index is not the data stream write index + * anymore. + *

+ * API name: {@code downsampling} + */ + public final Builder downsampling( + Function> fn) { + return this.downsampling(fn.apply(new DataStreamLifecycleDownsampling.Builder()).build()); + } /** * Type of data stream that wildcard patterns can match. Supports @@ -281,30 +372,6 @@ public final Builder timeout(Function> fn) { return this.timeout(fn.apply(new Time.Builder()).build()); } - /** - * Required - Request body. - */ - public final Builder lifecycle(DataStreamLifecycle value) { - this.lifecycle = value; - return this; - } - - /** - * Required - Request body. - */ - public final Builder lifecycle(Function> fn) { - return this.lifecycle(fn.apply(new DataStreamLifecycle.Builder()).build()); - } - - @Override - public Builder withJson(JsonParser parser, JsonpMapper mapper) { - - @SuppressWarnings("unchecked") - DataStreamLifecycle value = (DataStreamLifecycle) DataStreamLifecycle._DESERIALIZER.deserialize(parser, - mapper); - return this.lifecycle(value); - } - @Override protected Builder self() { return this; @@ -323,13 +390,20 @@ public PutDataLifecycleRequest build() { } } - public static final JsonpDeserializer _DESERIALIZER = createPutDataLifecycleRequestDeserializer(); - protected static JsonpDeserializer createPutDataLifecycleRequestDeserializer() { + // --------------------------------------------------------------------------------------------- + + /** + * Json deserializer for {@link PutDataLifecycleRequest} + */ + public static final JsonpDeserializer _DESERIALIZER = ObjectBuilderDeserializer + .lazy(Builder::new, PutDataLifecycleRequest::setupPutDataLifecycleRequestDeserializer); + + protected static void setupPutDataLifecycleRequestDeserializer( + ObjectDeserializer op) { - JsonpDeserializer valueDeserializer = DataStreamLifecycle._DESERIALIZER; + op.add(Builder::dataRetention, Time._DESERIALIZER, "data_retention"); + op.add(Builder::downsampling, DataStreamLifecycleDownsampling._DESERIALIZER, "downsampling"); - return JsonpDeserializer.of(valueDeserializer.acceptedEvents(), (parser, mapper, event) -> new Builder() - .lifecycle(valueDeserializer.deserialize(parser, mapper, event)).build()); } // --------------------------------------------------------------------------------------------- diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/SegmentsRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/SegmentsRequest.java index 87cd28ed4..826c117d4 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/SegmentsRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/SegmentsRequest.java @@ -78,6 +78,9 @@ public class SegmentsRequest extends RequestBase { private final List index; + @Nullable + private final Boolean verbose; + // --------------------------------------------------------------------------------------------- private SegmentsRequest(Builder builder) { @@ -86,6 +89,7 @@ private SegmentsRequest(Builder builder) { this.expandWildcards = ApiTypeHelper.unmodifiable(builder.expandWildcards); this.ignoreUnavailable = builder.ignoreUnavailable; this.index = ApiTypeHelper.unmodifiable(builder.index); + this.verbose = builder.verbose; } @@ -142,6 +146,16 @@ public final List index() { return this.index; } + /** + * If true, the request returns a verbose response. + *

+ * API name: {@code verbose} + */ + @Nullable + public final Boolean verbose() { + return this.verbose; + } + // --------------------------------------------------------------------------------------------- /** @@ -161,6 +175,9 @@ public static class Builder extends RequestBase.AbstractBuilder impleme @Nullable private List index; + @Nullable + private Boolean verbose; + /** * If false, the request returns an error if any wildcard * expression, index alias, or _all value targets only missing or @@ -247,6 +264,16 @@ public final Builder index(String value, String... values) { return this; } + /** + * If true, the request returns a verbose response. + *

+ * API name: {@code verbose} + */ + public final Builder verbose(@Nullable Boolean value) { + this.verbose = value; + return this; + } + @Override protected Builder self() { return this; @@ -335,6 +362,9 @@ public SegmentsRequest build() { if (request.allowNoIndices != null) { params.put("allow_no_indices", String.valueOf(request.allowNoIndices)); } + if (request.verbose != null) { + params.put("verbose", String.valueOf(request.verbose)); + } return params; }, SimpleEndpoint.emptyMap(), false, SegmentsResponse._DESERIALIZER); diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/get_data_lifecycle/DataStreamWithLifecycle.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/get_data_lifecycle/DataStreamWithLifecycle.java index 4a1ae5cdf..c0f1c5612 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/get_data_lifecycle/DataStreamWithLifecycle.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/get_data_lifecycle/DataStreamWithLifecycle.java @@ -19,7 +19,7 @@ package co.elastic.clients.elasticsearch.indices.get_data_lifecycle; -import co.elastic.clients.elasticsearch.indices.DataStreamLifecycleWithRollover; +import co.elastic.clients.elasticsearch.indices.DataStreamLifecycle; import co.elastic.clients.json.JsonpDeserializable; import co.elastic.clients.json.JsonpDeserializer; import co.elastic.clients.json.JsonpMapper; @@ -64,7 +64,7 @@ public class DataStreamWithLifecycle implements JsonpSerializable { private final String name; @Nullable - private final DataStreamLifecycleWithRollover lifecycle; + private final DataStreamLifecycle lifecycle; // --------------------------------------------------------------------------------------------- @@ -90,7 +90,7 @@ public final String name() { * API name: {@code lifecycle} */ @Nullable - public final DataStreamLifecycleWithRollover lifecycle() { + public final DataStreamLifecycle lifecycle() { return this.lifecycle; } @@ -133,7 +133,7 @@ public static class Builder extends WithJsonObjectBuilderBase private String name; @Nullable - private DataStreamLifecycleWithRollover lifecycle; + private DataStreamLifecycle lifecycle; /** * Required - API name: {@code name} @@ -146,7 +146,7 @@ public final Builder name(String value) { /** * API name: {@code lifecycle} */ - public final Builder lifecycle(@Nullable DataStreamLifecycleWithRollover value) { + public final Builder lifecycle(@Nullable DataStreamLifecycle value) { this.lifecycle = value; return this; } @@ -154,9 +154,8 @@ public final Builder lifecycle(@Nullable DataStreamLifecycleWithRollover value) /** * API name: {@code lifecycle} */ - public final Builder lifecycle( - Function> fn) { - return this.lifecycle(fn.apply(new DataStreamLifecycleWithRollover.Builder()).build()); + public final Builder lifecycle(Function> fn) { + return this.lifecycle(fn.apply(new DataStreamLifecycle.Builder()).build()); } @Override @@ -189,7 +188,7 @@ protected static void setupDataStreamWithLifecycleDeserializer( ObjectDeserializer op) { op.add(Builder::name, JsonpDeserializer.stringDeserializer(), "name"); - op.add(Builder::lifecycle, DataStreamLifecycleWithRollover._DESERIALIZER, "lifecycle"); + op.add(Builder::lifecycle, DataStreamLifecycle._DESERIALIZER, "lifecycle"); } diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/inference/ElasticsearchInferenceAsyncClient.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/inference/ElasticsearchInferenceAsyncClient.java index 12346a688..64685ed53 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/inference/ElasticsearchInferenceAsyncClient.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/inference/ElasticsearchInferenceAsyncClient.java @@ -182,25 +182,7 @@ public final CompletableFuture inference( // ----- Endpoint: inference.put /** - * Create an inference endpoint. When you create an inference endpoint, the - * associated machine learning model is automatically deployed if it is not - * already running. After creating the endpoint, wait for the model deployment - * to complete before using it. To verify the deployment status, use the get - * trained model statistics API. Look for - * "state": "fully_allocated" in the response - * and ensure that the "allocation_count" matches the - * "target_allocation_count". Avoid creating multiple - * endpoints for the same model unless required, as each endpoint consumes - * significant resources. - *

- * IMPORTANT: The inference APIs enable you to use certain services, such as - * built-in machine learning models (ELSER, E5), models uploaded through Eland, - * Cohere, OpenAI, Mistral, Azure OpenAI, Google AI Studio, Google Vertex AI, - * Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models - * uploaded through Eland, the inference APIs offer an alternative way to use - * and manage trained models. However, if you do not plan to use the inference - * APIs to use these models or if you want to use non-NLP models, use the - * machine learning trained model APIs. + * Create an inference endpoint * * @see Documentation @@ -215,25 +197,7 @@ public CompletableFuture put(PutRequest request) { } /** - * Create an inference endpoint. When you create an inference endpoint, the - * associated machine learning model is automatically deployed if it is not - * already running. After creating the endpoint, wait for the model deployment - * to complete before using it. To verify the deployment status, use the get - * trained model statistics API. Look for - * "state": "fully_allocated" in the response - * and ensure that the "allocation_count" matches the - * "target_allocation_count". Avoid creating multiple - * endpoints for the same model unless required, as each endpoint consumes - * significant resources. - *

- * IMPORTANT: The inference APIs enable you to use certain services, such as - * built-in machine learning models (ELSER, E5), models uploaded through Eland, - * Cohere, OpenAI, Mistral, Azure OpenAI, Google AI Studio, Google Vertex AI, - * Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models - * uploaded through Eland, the inference APIs offer an alternative way to use - * and manage trained models. However, if you do not plan to use the inference - * APIs to use these models or if you want to use non-NLP models, use the - * machine learning trained model APIs. + * Create an inference endpoint * * @param fn * a function that initializes a builder to create the diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/inference/ElasticsearchInferenceClient.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/inference/ElasticsearchInferenceClient.java index 9910b5717..753d3646c 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/inference/ElasticsearchInferenceClient.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/inference/ElasticsearchInferenceClient.java @@ -181,25 +181,7 @@ public final InferenceResponse inference(Function"state": "fully_allocated" in the response - * and ensure that the "allocation_count" matches the - * "target_allocation_count". Avoid creating multiple - * endpoints for the same model unless required, as each endpoint consumes - * significant resources. - *

- * IMPORTANT: The inference APIs enable you to use certain services, such as - * built-in machine learning models (ELSER, E5), models uploaded through Eland, - * Cohere, OpenAI, Mistral, Azure OpenAI, Google AI Studio, Google Vertex AI, - * Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models - * uploaded through Eland, the inference APIs offer an alternative way to use - * and manage trained models. However, if you do not plan to use the inference - * APIs to use these models or if you want to use non-NLP models, use the - * machine learning trained model APIs. + * Create an inference endpoint * * @see Documentation @@ -214,25 +196,7 @@ public PutResponse put(PutRequest request) throws IOException, ElasticsearchExce } /** - * Create an inference endpoint. When you create an inference endpoint, the - * associated machine learning model is automatically deployed if it is not - * already running. After creating the endpoint, wait for the model deployment - * to complete before using it. To verify the deployment status, use the get - * trained model statistics API. Look for - * "state": "fully_allocated" in the response - * and ensure that the "allocation_count" matches the - * "target_allocation_count". Avoid creating multiple - * endpoints for the same model unless required, as each endpoint consumes - * significant resources. - *

- * IMPORTANT: The inference APIs enable you to use certain services, such as - * built-in machine learning models (ELSER, E5), models uploaded through Eland, - * Cohere, OpenAI, Mistral, Azure OpenAI, Google AI Studio, Google Vertex AI, - * Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models - * uploaded through Eland, the inference APIs offer an alternative way to use - * and manage trained models. However, if you do not plan to use the inference - * APIs to use these models or if you want to use non-NLP models, use the - * machine learning trained model APIs. + * Create an inference endpoint * * @param fn * a function that initializes a builder to create the diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/inference/PutRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/inference/PutRequest.java index 6f734f226..3de1fbd28 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/inference/PutRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/inference/PutRequest.java @@ -59,25 +59,7 @@ // typedef: inference.put.Request /** - * Create an inference endpoint. When you create an inference endpoint, the - * associated machine learning model is automatically deployed if it is not - * already running. After creating the endpoint, wait for the model deployment - * to complete before using it. To verify the deployment status, use the get - * trained model statistics API. Look for - * "state": "fully_allocated" in the response - * and ensure that the "allocation_count" matches the - * "target_allocation_count". Avoid creating multiple - * endpoints for the same model unless required, as each endpoint consumes - * significant resources. - *

- * IMPORTANT: The inference APIs enable you to use certain services, such as - * built-in machine learning models (ELSER, E5), models uploaded through Eland, - * Cohere, OpenAI, Mistral, Azure OpenAI, Google AI Studio, Google Vertex AI, - * Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models - * uploaded through Eland, the inference APIs offer an alternative way to use - * and manage trained models. However, if you do not plan to use the inference - * APIs to use these models or if you want to use non-NLP models, use the - * machine learning trained model APIs. + * Create an inference endpoint * * @see API * specification diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/ingest/DatabaseConfiguration.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/ingest/DatabaseConfiguration.java index 63a428ed8..0314ef484 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/ingest/DatabaseConfiguration.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/ingest/DatabaseConfiguration.java @@ -19,6 +19,7 @@ package co.elastic.clients.elasticsearch.ingest; +import co.elastic.clients.json.JsonEnum; import co.elastic.clients.json.JsonpDeserializable; import co.elastic.clients.json.JsonpDeserializer; import co.elastic.clients.json.JsonpMapper; @@ -28,8 +29,11 @@ import co.elastic.clients.json.ObjectDeserializer; import co.elastic.clients.util.ApiTypeHelper; import co.elastic.clients.util.ObjectBuilder; +import co.elastic.clients.util.TaggedUnion; +import co.elastic.clients.util.TaggedUnionUtils; import co.elastic.clients.util.WithJsonObjectBuilderBase; import jakarta.json.stream.JsonGenerator; +import java.lang.Object; import java.lang.String; import java.util.Objects; import java.util.function.Function; @@ -53,23 +57,69 @@ // typedef: ingest._types.DatabaseConfiguration /** - * + * The configuration necessary to identify which IP geolocation provider to use + * to download a database, as well as any provider-specific configuration + * necessary for such downloading. At present, the only supported providers are + * maxmind and ipinfo, and the maxmind provider requires that an account_id + * (string) is configured. A provider (either maxmind or ipinfo) must be + * specified. The web and local providers can be returned as read only + * configurations. + * * @see API * specification */ @JsonpDeserializable -public class DatabaseConfiguration implements JsonpSerializable { - private final String name; +public class DatabaseConfiguration implements TaggedUnion, JsonpSerializable { + + /** + * {@link DatabaseConfiguration} variant kinds. + * + * @see API + * specification + */ - private final Maxmind maxmind; + public enum Kind implements JsonEnum { + Maxmind("maxmind"), - // --------------------------------------------------------------------------------------------- + Ipinfo("ipinfo"), + + ; + + private final String jsonValue; + + Kind(String jsonValue) { + this.jsonValue = jsonValue; + } + + public String jsonValue() { + return this.jsonValue; + } + + } + + private final Kind _kind; + private final Object _value; + + @Override + public final Kind _kind() { + return _kind; + } + + @Override + public final Object _get() { + return _value; + } + + private final String name; private DatabaseConfiguration(Builder builder) { + this._kind = ApiTypeHelper.requireNonNull(builder._kind, builder, ""); + this._value = ApiTypeHelper.requireNonNull(builder._value, builder, ""); + this.name = ApiTypeHelper.requireNonNull(builder.name, this, "name"); - this.maxmind = ApiTypeHelper.requireNonNull(builder.maxmind, this, "maxmind"); } @@ -88,34 +138,54 @@ public final String name() { } /** - * Required - The configuration necessary to identify which IP geolocation - * provider to use to download the database, as well as any provider-specific - * configuration necessary for such downloading. At present, the only supported - * provider is maxmind, and the maxmind provider requires that an account_id - * (string) is configured. - *

- * API name: {@code maxmind} + * Is this variant instance of kind {@code maxmind}? */ - public final Maxmind maxmind() { - return this.maxmind; + public boolean isMaxmind() { + return _kind == Kind.Maxmind; } /** - * Serialize this object to JSON. + * Get the {@code maxmind} variant value. + * + * @throws IllegalStateException + * if the current variant is not of the {@code maxmind} kind. */ - public void serialize(JsonGenerator generator, JsonpMapper mapper) { - generator.writeStartObject(); - serializeInternal(generator, mapper); - generator.writeEnd(); + public Maxmind maxmind() { + return TaggedUnionUtils.get(this, Kind.Maxmind); + } + + /** + * Is this variant instance of kind {@code ipinfo}? + */ + public boolean isIpinfo() { + return _kind == Kind.Ipinfo; } - protected void serializeInternal(JsonGenerator generator, JsonpMapper mapper) { + /** + * Get the {@code ipinfo} variant value. + * + * @throws IllegalStateException + * if the current variant is not of the {@code ipinfo} kind. + */ + public Ipinfo ipinfo() { + return TaggedUnionUtils.get(this, Kind.Ipinfo); + } + + @Override + @SuppressWarnings("unchecked") + public void serialize(JsonGenerator generator, JsonpMapper mapper) { + + generator.writeStartObject(); generator.writeKey("name"); generator.write(this.name); - generator.writeKey("maxmind"); - this.maxmind.serialize(generator, mapper); + generator.writeKey(_kind.jsonValue()); + if (_value instanceof JsonpSerializable) { + ((JsonpSerializable) _value).serialize(generator, mapper); + } + + generator.writeEnd(); } @@ -124,18 +194,13 @@ public String toString() { return JsonpUtils.toString(this); } - // --------------------------------------------------------------------------------------------- - - /** - * Builder for {@link DatabaseConfiguration}. - */ - public static class Builder extends WithJsonObjectBuilderBase implements ObjectBuilder { - private String name; + private Kind _kind; + private Object _value; - private Maxmind maxmind; + private String name; /** * Required - The provider-assigned name of the IP geolocation database to @@ -148,64 +213,62 @@ public final Builder name(String value) { return this; } - /** - * Required - The configuration necessary to identify which IP geolocation - * provider to use to download the database, as well as any provider-specific - * configuration necessary for such downloading. At present, the only supported - * provider is maxmind, and the maxmind provider requires that an account_id - * (string) is configured. - *

- * API name: {@code maxmind} - */ - public final Builder maxmind(Maxmind value) { - this.maxmind = value; + @Override + protected Builder self() { return this; } + public ContainerBuilder maxmind(Maxmind v) { + this._kind = Kind.Maxmind; + this._value = v; + return new ContainerBuilder(); + } - /** - * Required - The configuration necessary to identify which IP geolocation - * provider to use to download the database, as well as any provider-specific - * configuration necessary for such downloading. At present, the only supported - * provider is maxmind, and the maxmind provider requires that an account_id - * (string) is configured. - *

- * API name: {@code maxmind} - */ - public final Builder maxmind(Function> fn) { + public ContainerBuilder maxmind(Function> fn) { return this.maxmind(fn.apply(new Maxmind.Builder()).build()); } - @Override - protected Builder self() { - return this; + public ContainerBuilder ipinfo(Ipinfo v) { + this._kind = Kind.Ipinfo; + this._value = v; + return new ContainerBuilder(); + } + + public ContainerBuilder ipinfo(Function> fn) { + return this.ipinfo(fn.apply(new Ipinfo.Builder()).build()); } - /** - * Builds a {@link DatabaseConfiguration}. - * - * @throws NullPointerException - * if some of the required fields are null. - */ public DatabaseConfiguration build() { _checkSingleUse(); - return new DatabaseConfiguration(this); } - } - - // --------------------------------------------------------------------------------------------- - /** - * Json deserializer for {@link DatabaseConfiguration} - */ - public static final JsonpDeserializer _DESERIALIZER = ObjectBuilderDeserializer - .lazy(Builder::new, DatabaseConfiguration::setupDatabaseConfigurationDeserializer); + public class ContainerBuilder implements ObjectBuilder { + + /** + * Required - The provider-assigned name of the IP geolocation database to + * download. + *

+ * API name: {@code name} + */ + public final ContainerBuilder name(String value) { + Builder.this.name = value; + return this; + } + + public DatabaseConfiguration build() { + return Builder.this.build(); + } + } + } - protected static void setupDatabaseConfigurationDeserializer(ObjectDeserializer op) { + protected static void setupDatabaseConfigurationDeserializer(ObjectDeserializer op) { op.add(Builder::name, JsonpDeserializer.stringDeserializer(), "name"); op.add(Builder::maxmind, Maxmind._DESERIALIZER, "maxmind"); + op.add(Builder::ipinfo, Ipinfo._DESERIALIZER, "ipinfo"); } + public static final JsonpDeserializer _DESERIALIZER = ObjectBuilderDeserializer + .lazy(Builder::new, DatabaseConfiguration::setupDatabaseConfigurationDeserializer, Builder::build); } diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/ingest/DatabaseConfigurationBuilders.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/ingest/DatabaseConfigurationBuilders.java new file mode 100644 index 000000000..17cbd004c --- /dev/null +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/ingest/DatabaseConfigurationBuilders.java @@ -0,0 +1,83 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package co.elastic.clients.elasticsearch.ingest; + +import co.elastic.clients.util.ObjectBuilder; +import java.util.function.Function; + +//---------------------------------------------------------------- +// THIS CODE IS GENERATED. MANUAL EDITS WILL BE LOST. +//---------------------------------------------------------------- +// +// This code is generated from the Elasticsearch API specification +// at https://github.com/elastic/elasticsearch-specification +// +// Manual updates to this file will be lost when the code is +// re-generated. +// +// If you find a property that is missing or wrongly typed, please +// open an issue or a PR on the API specification repository. +// +//---------------------------------------------------------------- + +/** + * Builders for {@link DatabaseConfiguration} variants. + */ +public class DatabaseConfigurationBuilders { + private DatabaseConfigurationBuilders() { + } + + /** + * Creates a builder for the {@link Maxmind maxmind} + * {@code DatabaseConfiguration} variant. + */ + public static Maxmind.Builder maxmind() { + return new Maxmind.Builder(); + } + + /** + * Creates a DatabaseConfiguration of the {@link Maxmind maxmind} + * {@code DatabaseConfiguration} variant. + */ + public static DatabaseConfiguration maxmind(Function> fn) { + DatabaseConfiguration.Builder builder = new DatabaseConfiguration.Builder(); + builder.maxmind(fn.apply(new Maxmind.Builder()).build()); + return builder.build(); + } + + /** + * Creates a builder for the {@link Ipinfo ipinfo} {@code DatabaseConfiguration} + * variant. + */ + public static Ipinfo.Builder ipinfo() { + return new Ipinfo.Builder(); + } + + /** + * Creates a DatabaseConfiguration of the {@link Ipinfo ipinfo} + * {@code DatabaseConfiguration} variant. + */ + public static DatabaseConfiguration ipinfo(Function> fn) { + DatabaseConfiguration.Builder builder = new DatabaseConfiguration.Builder(); + builder.ipinfo(fn.apply(new Ipinfo.Builder()).build()); + return builder.build(); + } + +} diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/ingest/DatabaseConfigurationFull.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/ingest/DatabaseConfigurationFull.java new file mode 100644 index 000000000..4c7c399a9 --- /dev/null +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/ingest/DatabaseConfigurationFull.java @@ -0,0 +1,330 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package co.elastic.clients.elasticsearch.ingest; + +import co.elastic.clients.json.JsonEnum; +import co.elastic.clients.json.JsonpDeserializable; +import co.elastic.clients.json.JsonpDeserializer; +import co.elastic.clients.json.JsonpMapper; +import co.elastic.clients.json.JsonpSerializable; +import co.elastic.clients.json.JsonpUtils; +import co.elastic.clients.json.ObjectBuilderDeserializer; +import co.elastic.clients.json.ObjectDeserializer; +import co.elastic.clients.util.ApiTypeHelper; +import co.elastic.clients.util.ObjectBuilder; +import co.elastic.clients.util.TaggedUnion; +import co.elastic.clients.util.TaggedUnionUtils; +import co.elastic.clients.util.WithJsonObjectBuilderBase; +import jakarta.json.stream.JsonGenerator; +import java.lang.Object; +import java.lang.String; +import java.util.Objects; +import java.util.function.Function; +import javax.annotation.Nullable; + +//---------------------------------------------------------------- +// THIS CODE IS GENERATED. MANUAL EDITS WILL BE LOST. +//---------------------------------------------------------------- +// +// This code is generated from the Elasticsearch API specification +// at https://github.com/elastic/elasticsearch-specification +// +// Manual updates to this file will be lost when the code is +// re-generated. +// +// If you find a property that is missing or wrongly typed, please +// open an issue or a PR on the API specification repository. +// +//---------------------------------------------------------------- + +// typedef: ingest._types.DatabaseConfigurationFull + +/** + * + * @see API + * specification + */ +@JsonpDeserializable +public class DatabaseConfigurationFull + implements + TaggedUnion, + JsonpSerializable { + + /** + * {@link DatabaseConfigurationFull} variant kinds. + * + * @see API + * specification + */ + + public enum Kind implements JsonEnum { + Web("web"), + + Local("local"), + + Maxmind("maxmind"), + + Ipinfo("ipinfo"), + + ; + + private final String jsonValue; + + Kind(String jsonValue) { + this.jsonValue = jsonValue; + } + + public String jsonValue() { + return this.jsonValue; + } + + } + + private final Kind _kind; + private final Object _value; + + @Override + public final Kind _kind() { + return _kind; + } + + @Override + public final Object _get() { + return _value; + } + + private final String name; + + private DatabaseConfigurationFull(Builder builder) { + + this._kind = ApiTypeHelper.requireNonNull(builder._kind, builder, ""); + this._value = ApiTypeHelper.requireNonNull(builder._value, builder, ""); + + this.name = ApiTypeHelper.requireNonNull(builder.name, this, "name"); + + } + + public static DatabaseConfigurationFull of(Function> fn) { + return fn.apply(new Builder()).build(); + } + + /** + * Required - The provider-assigned name of the IP geolocation database to + * download. + *

+ * API name: {@code name} + */ + public final String name() { + return this.name; + } + + /** + * Is this variant instance of kind {@code web}? + */ + public boolean isWeb() { + return _kind == Kind.Web; + } + + /** + * Get the {@code web} variant value. + * + * @throws IllegalStateException + * if the current variant is not of the {@code web} kind. + */ + public Web web() { + return TaggedUnionUtils.get(this, Kind.Web); + } + + /** + * Is this variant instance of kind {@code local}? + */ + public boolean isLocal() { + return _kind == Kind.Local; + } + + /** + * Get the {@code local} variant value. + * + * @throws IllegalStateException + * if the current variant is not of the {@code local} kind. + */ + public Local local() { + return TaggedUnionUtils.get(this, Kind.Local); + } + + /** + * Is this variant instance of kind {@code maxmind}? + */ + public boolean isMaxmind() { + return _kind == Kind.Maxmind; + } + + /** + * Get the {@code maxmind} variant value. + * + * @throws IllegalStateException + * if the current variant is not of the {@code maxmind} kind. + */ + public Maxmind maxmind() { + return TaggedUnionUtils.get(this, Kind.Maxmind); + } + + /** + * Is this variant instance of kind {@code ipinfo}? + */ + public boolean isIpinfo() { + return _kind == Kind.Ipinfo; + } + + /** + * Get the {@code ipinfo} variant value. + * + * @throws IllegalStateException + * if the current variant is not of the {@code ipinfo} kind. + */ + public Ipinfo ipinfo() { + return TaggedUnionUtils.get(this, Kind.Ipinfo); + } + + @Override + @SuppressWarnings("unchecked") + public void serialize(JsonGenerator generator, JsonpMapper mapper) { + + generator.writeStartObject(); + + generator.writeKey("name"); + generator.write(this.name); + + generator.writeKey(_kind.jsonValue()); + if (_value instanceof JsonpSerializable) { + ((JsonpSerializable) _value).serialize(generator, mapper); + } + + generator.writeEnd(); + + } + + @Override + public String toString() { + return JsonpUtils.toString(this); + } + + public static class Builder extends WithJsonObjectBuilderBase + implements + ObjectBuilder { + private Kind _kind; + private Object _value; + + private String name; + + /** + * Required - The provider-assigned name of the IP geolocation database to + * download. + *

+ * API name: {@code name} + */ + public final Builder name(String value) { + this.name = value; + return this; + } + + @Override + protected Builder self() { + return this; + } + public ContainerBuilder web(Web v) { + this._kind = Kind.Web; + this._value = v; + return new ContainerBuilder(); + } + + public ContainerBuilder web(Function> fn) { + return this.web(fn.apply(new Web.Builder()).build()); + } + + public ContainerBuilder local(Local v) { + this._kind = Kind.Local; + this._value = v; + return new ContainerBuilder(); + } + + public ContainerBuilder local(Function> fn) { + return this.local(fn.apply(new Local.Builder()).build()); + } + + public ContainerBuilder maxmind(Maxmind v) { + this._kind = Kind.Maxmind; + this._value = v; + return new ContainerBuilder(); + } + + public ContainerBuilder maxmind(Function> fn) { + return this.maxmind(fn.apply(new Maxmind.Builder()).build()); + } + + public ContainerBuilder ipinfo(Ipinfo v) { + this._kind = Kind.Ipinfo; + this._value = v; + return new ContainerBuilder(); + } + + public ContainerBuilder ipinfo(Function> fn) { + return this.ipinfo(fn.apply(new Ipinfo.Builder()).build()); + } + + public DatabaseConfigurationFull build() { + _checkSingleUse(); + return new DatabaseConfigurationFull(this); + } + + public class ContainerBuilder implements ObjectBuilder { + + /** + * Required - The provider-assigned name of the IP geolocation database to + * download. + *

+ * API name: {@code name} + */ + public final ContainerBuilder name(String value) { + Builder.this.name = value; + return this; + } + + public DatabaseConfigurationFull build() { + return Builder.this.build(); + } + } + } + + protected static void setupDatabaseConfigurationFullDeserializer(ObjectDeserializer op) { + + op.add(Builder::web, Web._DESERIALIZER, "web"); + op.add(Builder::local, Local._DESERIALIZER, "local"); + op.add(Builder::name, JsonpDeserializer.stringDeserializer(), "name"); + op.add(Builder::maxmind, Maxmind._DESERIALIZER, "maxmind"); + op.add(Builder::ipinfo, Ipinfo._DESERIALIZER, "ipinfo"); + + } + + public static final JsonpDeserializer _DESERIALIZER = ObjectBuilderDeserializer + .lazy(Builder::new, DatabaseConfigurationFull::setupDatabaseConfigurationFullDeserializer, Builder::build); +} diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/ingest/DatabaseConfigurationFullBuilders.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/ingest/DatabaseConfigurationFullBuilders.java new file mode 100644 index 000000000..f1b483a30 --- /dev/null +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/ingest/DatabaseConfigurationFullBuilders.java @@ -0,0 +1,119 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package co.elastic.clients.elasticsearch.ingest; + +import co.elastic.clients.util.ObjectBuilder; +import java.util.function.Function; + +//---------------------------------------------------------------- +// THIS CODE IS GENERATED. MANUAL EDITS WILL BE LOST. +//---------------------------------------------------------------- +// +// This code is generated from the Elasticsearch API specification +// at https://github.com/elastic/elasticsearch-specification +// +// Manual updates to this file will be lost when the code is +// re-generated. +// +// If you find a property that is missing or wrongly typed, please +// open an issue or a PR on the API specification repository. +// +//---------------------------------------------------------------- + +/** + * Builders for {@link DatabaseConfigurationFull} variants. + */ +public class DatabaseConfigurationFullBuilders { + private DatabaseConfigurationFullBuilders() { + } + + /** + * Creates a builder for the {@link Web web} {@code DatabaseConfigurationFull} + * variant. + */ + public static Web.Builder web() { + return new Web.Builder(); + } + + /** + * Creates a DatabaseConfigurationFull of the {@link Web web} + * {@code DatabaseConfigurationFull} variant. + */ + public static DatabaseConfigurationFull web(Function> fn) { + DatabaseConfigurationFull.Builder builder = new DatabaseConfigurationFull.Builder(); + builder.web(fn.apply(new Web.Builder()).build()); + return builder.build(); + } + + /** + * Creates a builder for the {@link Local local} + * {@code DatabaseConfigurationFull} variant. + */ + public static Local.Builder local() { + return new Local.Builder(); + } + + /** + * Creates a DatabaseConfigurationFull of the {@link Local local} + * {@code DatabaseConfigurationFull} variant. + */ + public static DatabaseConfigurationFull local(Function> fn) { + DatabaseConfigurationFull.Builder builder = new DatabaseConfigurationFull.Builder(); + builder.local(fn.apply(new Local.Builder()).build()); + return builder.build(); + } + + /** + * Creates a builder for the {@link Maxmind maxmind} + * {@code DatabaseConfigurationFull} variant. + */ + public static Maxmind.Builder maxmind() { + return new Maxmind.Builder(); + } + + /** + * Creates a DatabaseConfigurationFull of the {@link Maxmind maxmind} + * {@code DatabaseConfigurationFull} variant. + */ + public static DatabaseConfigurationFull maxmind(Function> fn) { + DatabaseConfigurationFull.Builder builder = new DatabaseConfigurationFull.Builder(); + builder.maxmind(fn.apply(new Maxmind.Builder()).build()); + return builder.build(); + } + + /** + * Creates a builder for the {@link Ipinfo ipinfo} + * {@code DatabaseConfigurationFull} variant. + */ + public static Ipinfo.Builder ipinfo() { + return new Ipinfo.Builder(); + } + + /** + * Creates a DatabaseConfigurationFull of the {@link Ipinfo ipinfo} + * {@code DatabaseConfigurationFull} variant. + */ + public static DatabaseConfigurationFull ipinfo(Function> fn) { + DatabaseConfigurationFull.Builder builder = new DatabaseConfigurationFull.Builder(); + builder.ipinfo(fn.apply(new Ipinfo.Builder()).build()); + return builder.build(); + } + +} diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/ingest/DatabaseConfigurationFullVariant.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/ingest/DatabaseConfigurationFullVariant.java new file mode 100644 index 000000000..01146affb --- /dev/null +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/ingest/DatabaseConfigurationFullVariant.java @@ -0,0 +1,44 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package co.elastic.clients.elasticsearch.ingest; + +//---------------------------------------------------------------- +// THIS CODE IS GENERATED. MANUAL EDITS WILL BE LOST. +//---------------------------------------------------------------- +// +// This code is generated from the Elasticsearch API specification +// at https://github.com/elastic/elasticsearch-specification +// +// Manual updates to this file will be lost when the code is +// re-generated. +// +// If you find a property that is missing or wrongly typed, please +// open an issue or a PR on the API specification repository. +// +//---------------------------------------------------------------- + +/** + * Base interface for {@link DatabaseConfigurationFull} variants. + */ +public interface DatabaseConfigurationFullVariant { + + DatabaseConfigurationFull.Kind _databaseConfigurationFullKind(); + +} diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/ingest/DatabaseConfigurationVariant.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/ingest/DatabaseConfigurationVariant.java new file mode 100644 index 000000000..88232446f --- /dev/null +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/ingest/DatabaseConfigurationVariant.java @@ -0,0 +1,44 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package co.elastic.clients.elasticsearch.ingest; + +//---------------------------------------------------------------- +// THIS CODE IS GENERATED. MANUAL EDITS WILL BE LOST. +//---------------------------------------------------------------- +// +// This code is generated from the Elasticsearch API specification +// at https://github.com/elastic/elasticsearch-specification +// +// Manual updates to this file will be lost when the code is +// re-generated. +// +// If you find a property that is missing or wrongly typed, please +// open an issue or a PR on the API specification repository. +// +//---------------------------------------------------------------- + +/** + * Base interface for {@link DatabaseConfiguration} variants. + */ +public interface DatabaseConfigurationVariant { + + DatabaseConfiguration.Kind _databaseConfigurationKind(); + +} diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/ingest/DeleteIpLocationDatabaseRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/ingest/DeleteIpLocationDatabaseRequest.java new file mode 100644 index 000000000..9fbffa205 --- /dev/null +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/ingest/DeleteIpLocationDatabaseRequest.java @@ -0,0 +1,289 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package co.elastic.clients.elasticsearch.ingest; + +import co.elastic.clients.elasticsearch._types.ErrorResponse; +import co.elastic.clients.elasticsearch._types.RequestBase; +import co.elastic.clients.elasticsearch._types.Time; +import co.elastic.clients.json.JsonpDeserializable; +import co.elastic.clients.json.JsonpDeserializer; +import co.elastic.clients.json.ObjectBuilderDeserializer; +import co.elastic.clients.json.ObjectDeserializer; +import co.elastic.clients.transport.Endpoint; +import co.elastic.clients.transport.endpoints.SimpleEndpoint; +import co.elastic.clients.util.ApiTypeHelper; +import co.elastic.clients.util.ObjectBuilder; +import jakarta.json.stream.JsonGenerator; +import java.lang.String; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.function.Function; +import java.util.stream.Collectors; +import javax.annotation.Nullable; + +//---------------------------------------------------------------- +// THIS CODE IS GENERATED. MANUAL EDITS WILL BE LOST. +//---------------------------------------------------------------- +// +// This code is generated from the Elasticsearch API specification +// at https://github.com/elastic/elasticsearch-specification +// +// Manual updates to this file will be lost when the code is +// re-generated. +// +// If you find a property that is missing or wrongly typed, please +// open an issue or a PR on the API specification repository. +// +//---------------------------------------------------------------- + +// typedef: ingest.delete_ip_location_database.Request + +/** + * Deletes an IP location database configuration. + * + * @see API + * specification + */ + +public class DeleteIpLocationDatabaseRequest extends RequestBase { + private final List id; + + @Nullable + private final Time masterTimeout; + + @Nullable + private final Time timeout; + + // --------------------------------------------------------------------------------------------- + + private DeleteIpLocationDatabaseRequest(Builder builder) { + + this.id = ApiTypeHelper.unmodifiableRequired(builder.id, this, "id"); + this.masterTimeout = builder.masterTimeout; + this.timeout = builder.timeout; + + } + + public static DeleteIpLocationDatabaseRequest of( + Function> fn) { + return fn.apply(new Builder()).build(); + } + + /** + * Required - A comma-separated list of IP location database configurations to + * delete + *

+ * API name: {@code id} + */ + public final List id() { + return this.id; + } + + /** + * Period to wait for a connection to the master node. If no response is + * received before the timeout expires, the request fails and returns an error. + *

+ * API name: {@code master_timeout} + */ + @Nullable + public final Time masterTimeout() { + return this.masterTimeout; + } + + /** + * Period to wait for a response. If no response is received before the timeout + * expires, the request fails and returns an error. + *

+ * API name: {@code timeout} + */ + @Nullable + public final Time timeout() { + return this.timeout; + } + + // --------------------------------------------------------------------------------------------- + + /** + * Builder for {@link DeleteIpLocationDatabaseRequest}. + */ + + public static class Builder extends RequestBase.AbstractBuilder + implements + ObjectBuilder { + private List id; + + @Nullable + private Time masterTimeout; + + @Nullable + private Time timeout; + + /** + * Required - A comma-separated list of IP location database configurations to + * delete + *

+ * API name: {@code id} + *

+ * Adds all elements of list to id. + */ + public final Builder id(List list) { + this.id = _listAddAll(this.id, list); + return this; + } + + /** + * Required - A comma-separated list of IP location database configurations to + * delete + *

+ * API name: {@code id} + *

+ * Adds one or more values to id. + */ + public final Builder id(String value, String... values) { + this.id = _listAdd(this.id, value, values); + return this; + } + + /** + * Period to wait for a connection to the master node. If no response is + * received before the timeout expires, the request fails and returns an error. + *

+ * API name: {@code master_timeout} + */ + public final Builder masterTimeout(@Nullable Time value) { + this.masterTimeout = value; + return this; + } + + /** + * Period to wait for a connection to the master node. If no response is + * received before the timeout expires, the request fails and returns an error. + *

+ * API name: {@code master_timeout} + */ + public final Builder masterTimeout(Function> fn) { + return this.masterTimeout(fn.apply(new Time.Builder()).build()); + } + + /** + * Period to wait for a response. If no response is received before the timeout + * expires, the request fails and returns an error. + *

+ * API name: {@code timeout} + */ + public final Builder timeout(@Nullable Time value) { + this.timeout = value; + return this; + } + + /** + * Period to wait for a response. If no response is received before the timeout + * expires, the request fails and returns an error. + *

+ * API name: {@code timeout} + */ + public final Builder timeout(Function> fn) { + return this.timeout(fn.apply(new Time.Builder()).build()); + } + + @Override + protected Builder self() { + return this; + } + + /** + * Builds a {@link DeleteIpLocationDatabaseRequest}. + * + * @throws NullPointerException + * if some of the required fields are null. + */ + public DeleteIpLocationDatabaseRequest build() { + _checkSingleUse(); + + return new DeleteIpLocationDatabaseRequest(this); + } + } + + // --------------------------------------------------------------------------------------------- + + /** + * Endpoint "{@code ingest.delete_ip_location_database}". + */ + public static final Endpoint _ENDPOINT = new SimpleEndpoint<>( + "es/ingest.delete_ip_location_database", + + // Request method + request -> { + return "DELETE"; + + }, + + // Request path + request -> { + final int _id = 1 << 0; + + int propsSet = 0; + + propsSet |= _id; + + if (propsSet == (_id)) { + StringBuilder buf = new StringBuilder(); + buf.append("/_ingest"); + buf.append("/ip_location"); + buf.append("/database"); + buf.append("/"); + SimpleEndpoint.pathEncode(request.id.stream().map(v -> v).collect(Collectors.joining(",")), buf); + return buf.toString(); + } + throw SimpleEndpoint.noPathTemplateFound("path"); + + }, + + // Path parameters + request -> { + Map params = new HashMap<>(); + final int _id = 1 << 0; + + int propsSet = 0; + + propsSet |= _id; + + if (propsSet == (_id)) { + params.put("id", request.id.stream().map(v -> v).collect(Collectors.joining(","))); + } + return params; + }, + + // Request parameters + request -> { + Map params = new HashMap<>(); + if (request.masterTimeout != null) { + params.put("master_timeout", request.masterTimeout._toJsonString()); + } + if (request.timeout != null) { + params.put("timeout", request.timeout._toJsonString()); + } + return params; + + }, SimpleEndpoint.emptyMap(), false, DeleteIpLocationDatabaseResponse._DESERIALIZER); +} diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/core/health_report/FileSettingsIndicator.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/ingest/DeleteIpLocationDatabaseResponse.java similarity index 51% rename from java-client/src/main/java/co/elastic/clients/elasticsearch/core/health_report/FileSettingsIndicator.java rename to java-client/src/main/java/co/elastic/clients/elasticsearch/ingest/DeleteIpLocationDatabaseResponse.java index 2fbfee90c..8c2b16082 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/core/health_report/FileSettingsIndicator.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/ingest/DeleteIpLocationDatabaseResponse.java @@ -17,18 +17,17 @@ * under the License. */ -package co.elastic.clients.elasticsearch.core.health_report; +package co.elastic.clients.elasticsearch.ingest; +import co.elastic.clients.elasticsearch._types.AcknowledgedResponseBase; import co.elastic.clients.json.JsonpDeserializable; import co.elastic.clients.json.JsonpDeserializer; -import co.elastic.clients.json.JsonpMapper; import co.elastic.clients.json.ObjectBuilderDeserializer; import co.elastic.clients.json.ObjectDeserializer; import co.elastic.clients.util.ObjectBuilder; import jakarta.json.stream.JsonGenerator; import java.util.Objects; import java.util.function.Function; -import javax.annotation.Nullable; //---------------------------------------------------------------- // THIS CODE IS GENERATED. MANUAL EDITS WILL BE LOST. @@ -45,109 +44,66 @@ // //---------------------------------------------------------------- -// typedef: _global.health_report.FileSettingsIndicator +// typedef: ingest.delete_ip_location_database.Response /** - * FILE_SETTINGS - * + * * @see API + * "../doc-files/api-spec.html#ingest.delete_ip_location_database.Response">API * specification */ @JsonpDeserializable -public class FileSettingsIndicator extends BaseIndicator { - @Nullable - private final FileSettingsIndicatorDetails details; - +public class DeleteIpLocationDatabaseResponse extends AcknowledgedResponseBase { // --------------------------------------------------------------------------------------------- - private FileSettingsIndicator(Builder builder) { + private DeleteIpLocationDatabaseResponse(Builder builder) { super(builder); - this.details = builder.details; - } - public static FileSettingsIndicator of(Function> fn) { + public static DeleteIpLocationDatabaseResponse of( + Function> fn) { return fn.apply(new Builder()).build(); } - /** - * API name: {@code details} - */ - @Nullable - public final FileSettingsIndicatorDetails details() { - return this.details; - } - - protected void serializeInternal(JsonGenerator generator, JsonpMapper mapper) { - - super.serializeInternal(generator, mapper); - if (this.details != null) { - generator.writeKey("details"); - this.details.serialize(generator, mapper); - - } - - } - // --------------------------------------------------------------------------------------------- /** - * Builder for {@link FileSettingsIndicator}. + * Builder for {@link DeleteIpLocationDatabaseResponse}. */ - public static class Builder extends BaseIndicator.AbstractBuilder + public static class Builder extends AcknowledgedResponseBase.AbstractBuilder implements - ObjectBuilder { - @Nullable - private FileSettingsIndicatorDetails details; - - /** - * API name: {@code details} - */ - public final Builder details(@Nullable FileSettingsIndicatorDetails value) { - this.details = value; - return this; - } - - /** - * API name: {@code details} - */ - public final Builder details( - Function> fn) { - return this.details(fn.apply(new FileSettingsIndicatorDetails.Builder()).build()); - } - + ObjectBuilder { @Override protected Builder self() { return this; } /** - * Builds a {@link FileSettingsIndicator}. + * Builds a {@link DeleteIpLocationDatabaseResponse}. * * @throws NullPointerException * if some of the required fields are null. */ - public FileSettingsIndicator build() { + public DeleteIpLocationDatabaseResponse build() { _checkSingleUse(); - return new FileSettingsIndicator(this); + return new DeleteIpLocationDatabaseResponse(this); } } // --------------------------------------------------------------------------------------------- /** - * Json deserializer for {@link FileSettingsIndicator} + * Json deserializer for {@link DeleteIpLocationDatabaseResponse} */ - public static final JsonpDeserializer _DESERIALIZER = ObjectBuilderDeserializer - .lazy(Builder::new, FileSettingsIndicator::setupFileSettingsIndicatorDeserializer); + public static final JsonpDeserializer _DESERIALIZER = ObjectBuilderDeserializer + .lazy(Builder::new, DeleteIpLocationDatabaseResponse::setupDeleteIpLocationDatabaseResponseDeserializer); - protected static void setupFileSettingsIndicatorDeserializer(ObjectDeserializer op) { - BaseIndicator.setupBaseIndicatorDeserializer(op); - op.add(Builder::details, FileSettingsIndicatorDetails._DESERIALIZER, "details"); + protected static void setupDeleteIpLocationDatabaseResponseDeserializer( + ObjectDeserializer op) { + AcknowledgedResponseBase.setupAcknowledgedResponseBaseDeserializer(op); } diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/ingest/ElasticsearchIngestAsyncClient.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/ingest/ElasticsearchIngestAsyncClient.java index c9f5c9469..bb678fb65 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/ingest/ElasticsearchIngestAsyncClient.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/ingest/ElasticsearchIngestAsyncClient.java @@ -100,6 +100,40 @@ public final CompletableFuture deleteGeoipDatabase( return deleteGeoipDatabase(fn.apply(new DeleteGeoipDatabaseRequest.Builder()).build()); } + // ----- Endpoint: ingest.delete_ip_location_database + + /** + * Deletes an IP location database configuration. + * + * @see Documentation + * on elastic.co + */ + + public CompletableFuture deleteIpLocationDatabase( + DeleteIpLocationDatabaseRequest request) { + @SuppressWarnings("unchecked") + JsonEndpoint endpoint = (JsonEndpoint) DeleteIpLocationDatabaseRequest._ENDPOINT; + + return this.transport.performRequestAsync(request, endpoint, this.transportOptions); + } + + /** + * Deletes an IP location database configuration. + * + * @param fn + * a function that initializes a builder to create the + * {@link DeleteIpLocationDatabaseRequest} + * @see Documentation + * on elastic.co + */ + + public final CompletableFuture deleteIpLocationDatabase( + Function> fn) { + return deleteIpLocationDatabase(fn.apply(new DeleteIpLocationDatabaseRequest.Builder()).build()); + } + // ----- Endpoint: ingest.delete_pipeline /** @@ -197,6 +231,53 @@ public CompletableFuture getGeoipDatabase() { GetGeoipDatabaseRequest._ENDPOINT, this.transportOptions); } + // ----- Endpoint: ingest.get_ip_location_database + + /** + * Returns information about one or more IP location database configurations. + * + * @see Documentation + * on elastic.co + */ + + public CompletableFuture getIpLocationDatabase( + GetIpLocationDatabaseRequest request) { + @SuppressWarnings("unchecked") + JsonEndpoint endpoint = (JsonEndpoint) GetIpLocationDatabaseRequest._ENDPOINT; + + return this.transport.performRequestAsync(request, endpoint, this.transportOptions); + } + + /** + * Returns information about one or more IP location database configurations. + * + * @param fn + * a function that initializes a builder to create the + * {@link GetIpLocationDatabaseRequest} + * @see Documentation + * on elastic.co + */ + + public final CompletableFuture getIpLocationDatabase( + Function> fn) { + return getIpLocationDatabase(fn.apply(new GetIpLocationDatabaseRequest.Builder()).build()); + } + + /** + * Returns information about one or more IP location database configurations. + * + * @see Documentation + * on elastic.co + */ + + public CompletableFuture getIpLocationDatabase() { + return this.transport.performRequestAsync(new GetIpLocationDatabaseRequest.Builder().build(), + GetIpLocationDatabaseRequest._ENDPOINT, this.transportOptions); + } + // ----- Endpoint: ingest.get_pipeline /** @@ -299,6 +380,40 @@ public final CompletableFuture putGeoipDatabase( return putGeoipDatabase(fn.apply(new PutGeoipDatabaseRequest.Builder()).build()); } + // ----- Endpoint: ingest.put_ip_location_database + + /** + * Returns information about one or more IP location database configurations. + * + * @see Documentation + * on elastic.co + */ + + public CompletableFuture putIpLocationDatabase( + PutIpLocationDatabaseRequest request) { + @SuppressWarnings("unchecked") + JsonEndpoint endpoint = (JsonEndpoint) PutIpLocationDatabaseRequest._ENDPOINT; + + return this.transport.performRequestAsync(request, endpoint, this.transportOptions); + } + + /** + * Returns information about one or more IP location database configurations. + * + * @param fn + * a function that initializes a builder to create the + * {@link PutIpLocationDatabaseRequest} + * @see Documentation + * on elastic.co + */ + + public final CompletableFuture putIpLocationDatabase( + Function> fn) { + return putIpLocationDatabase(fn.apply(new PutIpLocationDatabaseRequest.Builder()).build()); + } + // ----- Endpoint: ingest.put_pipeline /** diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/ingest/ElasticsearchIngestClient.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/ingest/ElasticsearchIngestClient.java index 3bb097c4f..c4d198a4e 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/ingest/ElasticsearchIngestClient.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/ingest/ElasticsearchIngestClient.java @@ -102,6 +102,41 @@ public final DeleteGeoipDatabaseResponse deleteGeoipDatabase( return deleteGeoipDatabase(fn.apply(new DeleteGeoipDatabaseRequest.Builder()).build()); } + // ----- Endpoint: ingest.delete_ip_location_database + + /** + * Deletes an IP location database configuration. + * + * @see Documentation + * on elastic.co + */ + + public DeleteIpLocationDatabaseResponse deleteIpLocationDatabase(DeleteIpLocationDatabaseRequest request) + throws IOException, ElasticsearchException { + @SuppressWarnings("unchecked") + JsonEndpoint endpoint = (JsonEndpoint) DeleteIpLocationDatabaseRequest._ENDPOINT; + + return this.transport.performRequest(request, endpoint, this.transportOptions); + } + + /** + * Deletes an IP location database configuration. + * + * @param fn + * a function that initializes a builder to create the + * {@link DeleteIpLocationDatabaseRequest} + * @see Documentation + * on elastic.co + */ + + public final DeleteIpLocationDatabaseResponse deleteIpLocationDatabase( + Function> fn) + throws IOException, ElasticsearchException { + return deleteIpLocationDatabase(fn.apply(new DeleteIpLocationDatabaseRequest.Builder()).build()); + } + // ----- Endpoint: ingest.delete_pipeline /** @@ -203,6 +238,54 @@ public GetGeoipDatabaseResponse getGeoipDatabase() throws IOException, Elasticse GetGeoipDatabaseRequest._ENDPOINT, this.transportOptions); } + // ----- Endpoint: ingest.get_ip_location_database + + /** + * Returns information about one or more IP location database configurations. + * + * @see Documentation + * on elastic.co + */ + + public GetIpLocationDatabaseResponse getIpLocationDatabase(GetIpLocationDatabaseRequest request) + throws IOException, ElasticsearchException { + @SuppressWarnings("unchecked") + JsonEndpoint endpoint = (JsonEndpoint) GetIpLocationDatabaseRequest._ENDPOINT; + + return this.transport.performRequest(request, endpoint, this.transportOptions); + } + + /** + * Returns information about one or more IP location database configurations. + * + * @param fn + * a function that initializes a builder to create the + * {@link GetIpLocationDatabaseRequest} + * @see Documentation + * on elastic.co + */ + + public final GetIpLocationDatabaseResponse getIpLocationDatabase( + Function> fn) + throws IOException, ElasticsearchException { + return getIpLocationDatabase(fn.apply(new GetIpLocationDatabaseRequest.Builder()).build()); + } + + /** + * Returns information about one or more IP location database configurations. + * + * @see Documentation + * on elastic.co + */ + + public GetIpLocationDatabaseResponse getIpLocationDatabase() throws IOException, ElasticsearchException { + return this.transport.performRequest(new GetIpLocationDatabaseRequest.Builder().build(), + GetIpLocationDatabaseRequest._ENDPOINT, this.transportOptions); + } + // ----- Endpoint: ingest.get_pipeline /** @@ -308,6 +391,41 @@ public final PutGeoipDatabaseResponse putGeoipDatabase( return putGeoipDatabase(fn.apply(new PutGeoipDatabaseRequest.Builder()).build()); } + // ----- Endpoint: ingest.put_ip_location_database + + /** + * Returns information about one or more IP location database configurations. + * + * @see Documentation + * on elastic.co + */ + + public PutIpLocationDatabaseResponse putIpLocationDatabase(PutIpLocationDatabaseRequest request) + throws IOException, ElasticsearchException { + @SuppressWarnings("unchecked") + JsonEndpoint endpoint = (JsonEndpoint) PutIpLocationDatabaseRequest._ENDPOINT; + + return this.transport.performRequest(request, endpoint, this.transportOptions); + } + + /** + * Returns information about one or more IP location database configurations. + * + * @param fn + * a function that initializes a builder to create the + * {@link PutIpLocationDatabaseRequest} + * @see Documentation + * on elastic.co + */ + + public final PutIpLocationDatabaseResponse putIpLocationDatabase( + Function> fn) + throws IOException, ElasticsearchException { + return putIpLocationDatabase(fn.apply(new PutIpLocationDatabaseRequest.Builder()).build()); + } + // ----- Endpoint: ingest.put_pipeline /** diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/ingest/GetIpLocationDatabaseRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/ingest/GetIpLocationDatabaseRequest.java new file mode 100644 index 000000000..570f1b352 --- /dev/null +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/ingest/GetIpLocationDatabaseRequest.java @@ -0,0 +1,261 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package co.elastic.clients.elasticsearch.ingest; + +import co.elastic.clients.elasticsearch._types.ErrorResponse; +import co.elastic.clients.elasticsearch._types.RequestBase; +import co.elastic.clients.elasticsearch._types.Time; +import co.elastic.clients.json.JsonpDeserializable; +import co.elastic.clients.json.JsonpDeserializer; +import co.elastic.clients.json.ObjectBuilderDeserializer; +import co.elastic.clients.json.ObjectDeserializer; +import co.elastic.clients.transport.Endpoint; +import co.elastic.clients.transport.endpoints.SimpleEndpoint; +import co.elastic.clients.util.ApiTypeHelper; +import co.elastic.clients.util.ObjectBuilder; +import jakarta.json.stream.JsonGenerator; +import java.lang.String; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.function.Function; +import java.util.stream.Collectors; +import javax.annotation.Nullable; + +//---------------------------------------------------------------- +// THIS CODE IS GENERATED. MANUAL EDITS WILL BE LOST. +//---------------------------------------------------------------- +// +// This code is generated from the Elasticsearch API specification +// at https://github.com/elastic/elasticsearch-specification +// +// Manual updates to this file will be lost when the code is +// re-generated. +// +// If you find a property that is missing or wrongly typed, please +// open an issue or a PR on the API specification repository. +// +//---------------------------------------------------------------- + +// typedef: ingest.get_ip_location_database.Request + +/** + * Returns information about one or more IP location database configurations. + * + * @see API + * specification + */ + +public class GetIpLocationDatabaseRequest extends RequestBase { + private final List id; + + @Nullable + private final Time masterTimeout; + + // --------------------------------------------------------------------------------------------- + + private GetIpLocationDatabaseRequest(Builder builder) { + + this.id = ApiTypeHelper.unmodifiable(builder.id); + this.masterTimeout = builder.masterTimeout; + + } + + public static GetIpLocationDatabaseRequest of(Function> fn) { + return fn.apply(new Builder()).build(); + } + + /** + * Comma-separated list of database configuration IDs to retrieve. Wildcard + * (*) expressions are supported. To get all database + * configurations, omit this parameter or use *. + *

+ * API name: {@code id} + */ + public final List id() { + return this.id; + } + + /** + * Period to wait for a connection to the master node. If no response is + * received before the timeout expires, the request fails and returns an error. + *

+ * API name: {@code master_timeout} + */ + @Nullable + public final Time masterTimeout() { + return this.masterTimeout; + } + + // --------------------------------------------------------------------------------------------- + + /** + * Builder for {@link GetIpLocationDatabaseRequest}. + */ + + public static class Builder extends RequestBase.AbstractBuilder + implements + ObjectBuilder { + @Nullable + private List id; + + @Nullable + private Time masterTimeout; + + /** + * Comma-separated list of database configuration IDs to retrieve. Wildcard + * (*) expressions are supported. To get all database + * configurations, omit this parameter or use *. + *

+ * API name: {@code id} + *

+ * Adds all elements of list to id. + */ + public final Builder id(List list) { + this.id = _listAddAll(this.id, list); + return this; + } + + /** + * Comma-separated list of database configuration IDs to retrieve. Wildcard + * (*) expressions are supported. To get all database + * configurations, omit this parameter or use *. + *

+ * API name: {@code id} + *

+ * Adds one or more values to id. + */ + public final Builder id(String value, String... values) { + this.id = _listAdd(this.id, value, values); + return this; + } + + /** + * Period to wait for a connection to the master node. If no response is + * received before the timeout expires, the request fails and returns an error. + *

+ * API name: {@code master_timeout} + */ + public final Builder masterTimeout(@Nullable Time value) { + this.masterTimeout = value; + return this; + } + + /** + * Period to wait for a connection to the master node. If no response is + * received before the timeout expires, the request fails and returns an error. + *

+ * API name: {@code master_timeout} + */ + public final Builder masterTimeout(Function> fn) { + return this.masterTimeout(fn.apply(new Time.Builder()).build()); + } + + @Override + protected Builder self() { + return this; + } + + /** + * Builds a {@link GetIpLocationDatabaseRequest}. + * + * @throws NullPointerException + * if some of the required fields are null. + */ + public GetIpLocationDatabaseRequest build() { + _checkSingleUse(); + + return new GetIpLocationDatabaseRequest(this); + } + } + + // --------------------------------------------------------------------------------------------- + + /** + * Endpoint "{@code ingest.get_ip_location_database}". + */ + public static final Endpoint _ENDPOINT = new SimpleEndpoint<>( + "es/ingest.get_ip_location_database", + + // Request method + request -> { + return "GET"; + + }, + + // Request path + request -> { + final int _id = 1 << 0; + + int propsSet = 0; + + if (ApiTypeHelper.isDefined(request.id())) + propsSet |= _id; + + if (propsSet == 0) { + StringBuilder buf = new StringBuilder(); + buf.append("/_ingest"); + buf.append("/ip_location"); + buf.append("/database"); + return buf.toString(); + } + if (propsSet == (_id)) { + StringBuilder buf = new StringBuilder(); + buf.append("/_ingest"); + buf.append("/ip_location"); + buf.append("/database"); + buf.append("/"); + SimpleEndpoint.pathEncode(request.id.stream().map(v -> v).collect(Collectors.joining(",")), buf); + return buf.toString(); + } + throw SimpleEndpoint.noPathTemplateFound("path"); + + }, + + // Path parameters + request -> { + Map params = new HashMap<>(); + final int _id = 1 << 0; + + int propsSet = 0; + + if (ApiTypeHelper.isDefined(request.id())) + propsSet |= _id; + + if (propsSet == 0) { + } + if (propsSet == (_id)) { + params.put("id", request.id.stream().map(v -> v).collect(Collectors.joining(","))); + } + return params; + }, + + // Request parameters + request -> { + Map params = new HashMap<>(); + if (request.masterTimeout != null) { + params.put("master_timeout", request.masterTimeout._toJsonString()); + } + return params; + + }, SimpleEndpoint.emptyMap(), false, GetIpLocationDatabaseResponse._DESERIALIZER); +} diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/core/health_report/FileSettingsIndicatorDetails.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/ingest/GetIpLocationDatabaseResponse.java similarity index 53% rename from java-client/src/main/java/co/elastic/clients/elasticsearch/core/health_report/FileSettingsIndicatorDetails.java rename to java-client/src/main/java/co/elastic/clients/elasticsearch/ingest/GetIpLocationDatabaseResponse.java index dd7f978ba..ccc8d9f58 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/core/health_report/FileSettingsIndicatorDetails.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/ingest/GetIpLocationDatabaseResponse.java @@ -17,8 +17,9 @@ * under the License. */ -package co.elastic.clients.elasticsearch.core.health_report; +package co.elastic.clients.elasticsearch.ingest; +import co.elastic.clients.elasticsearch.ingest.get_ip_location_database.DatabaseConfigurationMetadata; import co.elastic.clients.json.JsonpDeserializable; import co.elastic.clients.json.JsonpDeserializer; import co.elastic.clients.json.JsonpMapper; @@ -30,8 +31,7 @@ import co.elastic.clients.util.ObjectBuilder; import co.elastic.clients.util.WithJsonObjectBuilderBase; import jakarta.json.stream.JsonGenerator; -import java.lang.Long; -import java.lang.String; +import java.util.List; import java.util.Objects; import java.util.function.Function; import javax.annotation.Nullable; @@ -51,45 +51,35 @@ // //---------------------------------------------------------------- -// typedef: _global.health_report.FileSettingsIndicatorDetails +// typedef: ingest.get_ip_location_database.Response /** * * @see API + * "../doc-files/api-spec.html#ingest.get_ip_location_database.Response">API * specification */ @JsonpDeserializable -public class FileSettingsIndicatorDetails implements JsonpSerializable { - private final long failureStreak; - - private final String mostRecentFailure; +public class GetIpLocationDatabaseResponse implements JsonpSerializable { + private final List databases; // --------------------------------------------------------------------------------------------- - private FileSettingsIndicatorDetails(Builder builder) { + private GetIpLocationDatabaseResponse(Builder builder) { - this.failureStreak = ApiTypeHelper.requireNonNull(builder.failureStreak, this, "failureStreak"); - this.mostRecentFailure = ApiTypeHelper.requireNonNull(builder.mostRecentFailure, this, "mostRecentFailure"); + this.databases = ApiTypeHelper.unmodifiableRequired(builder.databases, this, "databases"); } - public static FileSettingsIndicatorDetails of(Function> fn) { + public static GetIpLocationDatabaseResponse of(Function> fn) { return fn.apply(new Builder()).build(); } /** - * Required - API name: {@code failure_streak} - */ - public final long failureStreak() { - return this.failureStreak; - } - - /** - * Required - API name: {@code most_recent_failure} + * Required - API name: {@code databases} */ - public final String mostRecentFailure() { - return this.mostRecentFailure; + public final List databases() { + return this.databases; } /** @@ -103,11 +93,16 @@ public void serialize(JsonGenerator generator, JsonpMapper mapper) { protected void serializeInternal(JsonGenerator generator, JsonpMapper mapper) { - generator.writeKey("failure_streak"); - generator.write(this.failureStreak); + if (ApiTypeHelper.isDefined(this.databases)) { + generator.writeKey("databases"); + generator.writeStartArray(); + for (DatabaseConfigurationMetadata item0 : this.databases) { + item0.serialize(generator, mapper); + + } + generator.writeEnd(); - generator.writeKey("most_recent_failure"); - generator.write(this.mostRecentFailure); + } } @@ -119,63 +114,75 @@ public String toString() { // --------------------------------------------------------------------------------------------- /** - * Builder for {@link FileSettingsIndicatorDetails}. + * Builder for {@link GetIpLocationDatabaseResponse}. */ public static class Builder extends WithJsonObjectBuilderBase implements - ObjectBuilder { - private Long failureStreak; - - private String mostRecentFailure; + ObjectBuilder { + private List databases; /** - * Required - API name: {@code failure_streak} + * Required - API name: {@code databases} + *

+ * Adds all elements of list to databases. */ - public final Builder failureStreak(long value) { - this.failureStreak = value; + public final Builder databases(List list) { + this.databases = _listAddAll(this.databases, list); return this; } /** - * Required - API name: {@code most_recent_failure} + * Required - API name: {@code databases} + *

+ * Adds one or more values to databases. */ - public final Builder mostRecentFailure(String value) { - this.mostRecentFailure = value; + public final Builder databases(DatabaseConfigurationMetadata value, DatabaseConfigurationMetadata... values) { + this.databases = _listAdd(this.databases, value, values); return this; } + /** + * Required - API name: {@code databases} + *

+ * Adds a value to databases using a builder lambda. + */ + public final Builder databases( + Function> fn) { + return databases(fn.apply(new DatabaseConfigurationMetadata.Builder()).build()); + } + @Override protected Builder self() { return this; } /** - * Builds a {@link FileSettingsIndicatorDetails}. + * Builds a {@link GetIpLocationDatabaseResponse}. * * @throws NullPointerException * if some of the required fields are null. */ - public FileSettingsIndicatorDetails build() { + public GetIpLocationDatabaseResponse build() { _checkSingleUse(); - return new FileSettingsIndicatorDetails(this); + return new GetIpLocationDatabaseResponse(this); } } // --------------------------------------------------------------------------------------------- /** - * Json deserializer for {@link FileSettingsIndicatorDetails} + * Json deserializer for {@link GetIpLocationDatabaseResponse} */ - public static final JsonpDeserializer _DESERIALIZER = ObjectBuilderDeserializer - .lazy(Builder::new, FileSettingsIndicatorDetails::setupFileSettingsIndicatorDetailsDeserializer); + public static final JsonpDeserializer _DESERIALIZER = ObjectBuilderDeserializer + .lazy(Builder::new, GetIpLocationDatabaseResponse::setupGetIpLocationDatabaseResponseDeserializer); - protected static void setupFileSettingsIndicatorDetailsDeserializer( - ObjectDeserializer op) { + protected static void setupGetIpLocationDatabaseResponseDeserializer( + ObjectDeserializer op) { - op.add(Builder::failureStreak, JsonpDeserializer.longDeserializer(), "failure_streak"); - op.add(Builder::mostRecentFailure, JsonpDeserializer.stringDeserializer(), "most_recent_failure"); + op.add(Builder::databases, JsonpDeserializer.arrayDeserializer(DatabaseConfigurationMetadata._DESERIALIZER), + "databases"); } diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/ingest/Ipinfo.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/ingest/Ipinfo.java new file mode 100644 index 000000000..85c4569c9 --- /dev/null +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/ingest/Ipinfo.java @@ -0,0 +1,90 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package co.elastic.clients.elasticsearch.ingest; + +import co.elastic.clients.json.JsonpDeserializable; +import co.elastic.clients.json.JsonpDeserializer; +import co.elastic.clients.json.JsonpMapper; +import co.elastic.clients.json.JsonpSerializable; +import co.elastic.clients.util.ObjectBuilder; +import jakarta.json.stream.JsonGenerator; + +//---------------------------------------------------------------- +// THIS CODE IS GENERATED. MANUAL EDITS WILL BE LOST. +//---------------------------------------------------------------- +// +// This code is generated from the Elasticsearch API specification +// at https://github.com/elastic/elasticsearch-specification +// +// Manual updates to this file will be lost when the code is +// re-generated. +// +// If you find a property that is missing or wrongly typed, please +// open an issue or a PR on the API specification repository. +// +//---------------------------------------------------------------- + +/** + * + * @see API + * specification + */ +@JsonpDeserializable +public class Ipinfo implements DatabaseConfigurationFullVariant, DatabaseConfigurationVariant, JsonpSerializable { + + /** + * DatabaseConfigurationFull variant kind. + */ + @Override + public DatabaseConfigurationFull.Kind _databaseConfigurationFullKind() { + return DatabaseConfigurationFull.Kind.Ipinfo; + } + + /** + * DatabaseConfiguration variant kind. + */ + @Override + public DatabaseConfiguration.Kind _databaseConfigurationKind() { + return DatabaseConfiguration.Kind.Ipinfo; + } + + public static final class Builder implements ObjectBuilder { + @Override + public Ipinfo build() { + return Ipinfo._INSTANCE; + } + } + + /** + * Serialize this object to JSON. + */ + public void serialize(JsonGenerator generator, JsonpMapper mapper) { + generator.writeStartObject(); + generator.writeEnd(); + } + + /** + * Singleton instance for empty class {@link Ipinfo}. + */ + public static final Ipinfo _INSTANCE = new Ipinfo(); + + public static final JsonpDeserializer _DESERIALIZER = JsonpDeserializer.emptyObject(Ipinfo._INSTANCE); + +} diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/help/HelpRecord.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/ingest/Local.java similarity index 69% rename from java-client/src/main/java/co/elastic/clients/elasticsearch/cat/help/HelpRecord.java rename to java-client/src/main/java/co/elastic/clients/elasticsearch/ingest/Local.java index 5c193bb45..1f2cf6f1b 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/cat/help/HelpRecord.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/ingest/Local.java @@ -17,7 +17,7 @@ * under the License. */ -package co.elastic.clients.elasticsearch.cat.help; +package co.elastic.clients.elasticsearch.ingest; import co.elastic.clients.json.JsonpDeserializable; import co.elastic.clients.json.JsonpDeserializer; @@ -50,34 +50,42 @@ // //---------------------------------------------------------------- -// typedef: cat.help.HelpRecord +// typedef: ingest._types.Local /** * - * @see API + * @see API * specification */ @JsonpDeserializable -public class HelpRecord implements JsonpSerializable { - private final String endpoint; +public class Local implements DatabaseConfigurationFullVariant, JsonpSerializable { + private final String type; // --------------------------------------------------------------------------------------------- - private HelpRecord(Builder builder) { + private Local(Builder builder) { - this.endpoint = ApiTypeHelper.requireNonNull(builder.endpoint, this, "endpoint"); + this.type = ApiTypeHelper.requireNonNull(builder.type, this, "type"); } - public static HelpRecord of(Function> fn) { + public static Local of(Function> fn) { return fn.apply(new Builder()).build(); } /** - * Required - API name: {@code endpoint} + * DatabaseConfigurationFull variant kind. */ - public final String endpoint() { - return this.endpoint; + @Override + public DatabaseConfigurationFull.Kind _databaseConfigurationFullKind() { + return DatabaseConfigurationFull.Kind.Local; + } + + /** + * Required - API name: {@code type} + */ + public final String type() { + return this.type; } /** @@ -91,8 +99,8 @@ public void serialize(JsonGenerator generator, JsonpMapper mapper) { protected void serializeInternal(JsonGenerator generator, JsonpMapper mapper) { - generator.writeKey("endpoint"); - generator.write(this.endpoint); + generator.writeKey("type"); + generator.write(this.type); } @@ -104,17 +112,17 @@ public String toString() { // --------------------------------------------------------------------------------------------- /** - * Builder for {@link HelpRecord}. + * Builder for {@link Local}. */ - public static class Builder extends WithJsonObjectBuilderBase implements ObjectBuilder { - private String endpoint; + public static class Builder extends WithJsonObjectBuilderBase implements ObjectBuilder { + private String type; /** - * Required - API name: {@code endpoint} + * Required - API name: {@code type} */ - public final Builder endpoint(String value) { - this.endpoint = value; + public final Builder type(String value) { + this.type = value; return this; } @@ -124,29 +132,29 @@ protected Builder self() { } /** - * Builds a {@link HelpRecord}. + * Builds a {@link Local}. * * @throws NullPointerException * if some of the required fields are null. */ - public HelpRecord build() { + public Local build() { _checkSingleUse(); - return new HelpRecord(this); + return new Local(this); } } // --------------------------------------------------------------------------------------------- /** - * Json deserializer for {@link HelpRecord} + * Json deserializer for {@link Local} */ - public static final JsonpDeserializer _DESERIALIZER = ObjectBuilderDeserializer.lazy(Builder::new, - HelpRecord::setupHelpRecordDeserializer); + public static final JsonpDeserializer _DESERIALIZER = ObjectBuilderDeserializer.lazy(Builder::new, + Local::setupLocalDeserializer); - protected static void setupHelpRecordDeserializer(ObjectDeserializer op) { + protected static void setupLocalDeserializer(ObjectDeserializer op) { - op.add(Builder::endpoint, JsonpDeserializer.stringDeserializer(), "endpoint"); + op.add(Builder::type, JsonpDeserializer.stringDeserializer(), "type"); } diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/ingest/Maxmind.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/ingest/Maxmind.java index 702572886..ac93cf0c4 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/ingest/Maxmind.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/ingest/Maxmind.java @@ -58,7 +58,7 @@ * specification */ @JsonpDeserializable -public class Maxmind implements JsonpSerializable { +public class Maxmind implements DatabaseConfigurationFullVariant, DatabaseConfigurationVariant, JsonpSerializable { private final String accountId; // --------------------------------------------------------------------------------------------- @@ -73,6 +73,22 @@ public static Maxmind of(Function> fn) { return fn.apply(new Builder()).build(); } + /** + * DatabaseConfigurationFull variant kind. + */ + @Override + public DatabaseConfigurationFull.Kind _databaseConfigurationFullKind() { + return DatabaseConfigurationFull.Kind.Maxmind; + } + + /** + * DatabaseConfiguration variant kind. + */ + @Override + public DatabaseConfiguration.Kind _databaseConfigurationKind() { + return DatabaseConfiguration.Kind.Maxmind; + } + /** * Required - API name: {@code account_id} */ diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/ingest/PutIpLocationDatabaseRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/ingest/PutIpLocationDatabaseRequest.java new file mode 100644 index 000000000..141b98cd9 --- /dev/null +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/ingest/PutIpLocationDatabaseRequest.java @@ -0,0 +1,326 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package co.elastic.clients.elasticsearch.ingest; + +import co.elastic.clients.elasticsearch._types.ErrorResponse; +import co.elastic.clients.elasticsearch._types.RequestBase; +import co.elastic.clients.elasticsearch._types.Time; +import co.elastic.clients.json.JsonpDeserializable; +import co.elastic.clients.json.JsonpDeserializer; +import co.elastic.clients.json.JsonpMapper; +import co.elastic.clients.json.JsonpSerializable; +import co.elastic.clients.json.ObjectBuilderDeserializer; +import co.elastic.clients.json.ObjectDeserializer; +import co.elastic.clients.transport.Endpoint; +import co.elastic.clients.transport.endpoints.SimpleEndpoint; +import co.elastic.clients.util.ApiTypeHelper; +import co.elastic.clients.util.ObjectBuilder; +import jakarta.json.stream.JsonGenerator; +import jakarta.json.stream.JsonParser; +import java.lang.String; +import java.util.HashMap; +import java.util.Map; +import java.util.Objects; +import java.util.function.Function; +import javax.annotation.Nullable; + +//---------------------------------------------------------------- +// THIS CODE IS GENERATED. MANUAL EDITS WILL BE LOST. +//---------------------------------------------------------------- +// +// This code is generated from the Elasticsearch API specification +// at https://github.com/elastic/elasticsearch-specification +// +// Manual updates to this file will be lost when the code is +// re-generated. +// +// If you find a property that is missing or wrongly typed, please +// open an issue or a PR on the API specification repository. +// +//---------------------------------------------------------------- + +// typedef: ingest.put_ip_location_database.Request + +/** + * Returns information about one or more IP location database configurations. + * + * @see API + * specification + */ +@JsonpDeserializable +public class PutIpLocationDatabaseRequest extends RequestBase implements JsonpSerializable { + private final String id; + + @Nullable + private final Time masterTimeout; + + @Nullable + private final Time timeout; + + private final DatabaseConfiguration configuration; + + // --------------------------------------------------------------------------------------------- + + private PutIpLocationDatabaseRequest(Builder builder) { + + this.id = ApiTypeHelper.requireNonNull(builder.id, this, "id"); + this.masterTimeout = builder.masterTimeout; + this.timeout = builder.timeout; + this.configuration = ApiTypeHelper.requireNonNull(builder.configuration, this, "configuration"); + + } + + public static PutIpLocationDatabaseRequest of(Function> fn) { + return fn.apply(new Builder()).build(); + } + + /** + * Required - ID of the database configuration to create or update. + *

+ * API name: {@code id} + */ + public final String id() { + return this.id; + } + + /** + * Period to wait for a connection to the master node. If no response is + * received before the timeout expires, the request fails and returns an error. + *

+ * API name: {@code master_timeout} + */ + @Nullable + public final Time masterTimeout() { + return this.masterTimeout; + } + + /** + * Period to wait for a response. If no response is received before the timeout + * expires, the request fails and returns an error. + *

+ * API name: {@code timeout} + */ + @Nullable + public final Time timeout() { + return this.timeout; + } + + /** + * Required - Request body. + */ + public final DatabaseConfiguration configuration() { + return this.configuration; + } + + /** + * Serialize this value to JSON. + */ + public void serialize(JsonGenerator generator, JsonpMapper mapper) { + this.configuration.serialize(generator, mapper); + + } + + // --------------------------------------------------------------------------------------------- + + /** + * Builder for {@link PutIpLocationDatabaseRequest}. + */ + + public static class Builder extends RequestBase.AbstractBuilder + implements + ObjectBuilder { + private String id; + + @Nullable + private Time masterTimeout; + + @Nullable + private Time timeout; + + private DatabaseConfiguration configuration; + + /** + * Required - ID of the database configuration to create or update. + *

+ * API name: {@code id} + */ + public final Builder id(String value) { + this.id = value; + return this; + } + + /** + * Period to wait for a connection to the master node. If no response is + * received before the timeout expires, the request fails and returns an error. + *

+ * API name: {@code master_timeout} + */ + public final Builder masterTimeout(@Nullable Time value) { + this.masterTimeout = value; + return this; + } + + /** + * Period to wait for a connection to the master node. If no response is + * received before the timeout expires, the request fails and returns an error. + *

+ * API name: {@code master_timeout} + */ + public final Builder masterTimeout(Function> fn) { + return this.masterTimeout(fn.apply(new Time.Builder()).build()); + } + + /** + * Period to wait for a response. If no response is received before the timeout + * expires, the request fails and returns an error. + *

+ * API name: {@code timeout} + */ + public final Builder timeout(@Nullable Time value) { + this.timeout = value; + return this; + } + + /** + * Period to wait for a response. If no response is received before the timeout + * expires, the request fails and returns an error. + *

+ * API name: {@code timeout} + */ + public final Builder timeout(Function> fn) { + return this.timeout(fn.apply(new Time.Builder()).build()); + } + + /** + * Required - Request body. + */ + public final Builder configuration(DatabaseConfiguration value) { + this.configuration = value; + return this; + } + + /** + * Required - Request body. + */ + public final Builder configuration( + Function> fn) { + return this.configuration(fn.apply(new DatabaseConfiguration.Builder()).build()); + } + + @Override + public Builder withJson(JsonParser parser, JsonpMapper mapper) { + + @SuppressWarnings("unchecked") + DatabaseConfiguration value = (DatabaseConfiguration) DatabaseConfiguration._DESERIALIZER + .deserialize(parser, mapper); + return this.configuration(value); + } + + @Override + protected Builder self() { + return this; + } + + /** + * Builds a {@link PutIpLocationDatabaseRequest}. + * + * @throws NullPointerException + * if some of the required fields are null. + */ + public PutIpLocationDatabaseRequest build() { + _checkSingleUse(); + + return new PutIpLocationDatabaseRequest(this); + } + } + + public static final JsonpDeserializer _DESERIALIZER = createPutIpLocationDatabaseRequestDeserializer(); + protected static JsonpDeserializer createPutIpLocationDatabaseRequestDeserializer() { + + JsonpDeserializer valueDeserializer = DatabaseConfiguration._DESERIALIZER; + + return JsonpDeserializer.of(valueDeserializer.acceptedEvents(), (parser, mapper, event) -> new Builder() + .configuration(valueDeserializer.deserialize(parser, mapper, event)).build()); + } + + // --------------------------------------------------------------------------------------------- + + /** + * Endpoint "{@code ingest.put_ip_location_database}". + */ + public static final Endpoint _ENDPOINT = new SimpleEndpoint<>( + "es/ingest.put_ip_location_database", + + // Request method + request -> { + return "PUT"; + + }, + + // Request path + request -> { + final int _id = 1 << 0; + + int propsSet = 0; + + propsSet |= _id; + + if (propsSet == (_id)) { + StringBuilder buf = new StringBuilder(); + buf.append("/_ingest"); + buf.append("/ip_location"); + buf.append("/database"); + buf.append("/"); + SimpleEndpoint.pathEncode(request.id, buf); + return buf.toString(); + } + throw SimpleEndpoint.noPathTemplateFound("path"); + + }, + + // Path parameters + request -> { + Map params = new HashMap<>(); + final int _id = 1 << 0; + + int propsSet = 0; + + propsSet |= _id; + + if (propsSet == (_id)) { + params.put("id", request.id); + } + return params; + }, + + // Request parameters + request -> { + Map params = new HashMap<>(); + if (request.masterTimeout != null) { + params.put("master_timeout", request.masterTimeout._toJsonString()); + } + if (request.timeout != null) { + params.put("timeout", request.timeout._toJsonString()); + } + return params; + + }, SimpleEndpoint.emptyMap(), true, PutIpLocationDatabaseResponse._DESERIALIZER); +} diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/ingest/PutIpLocationDatabaseResponse.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/ingest/PutIpLocationDatabaseResponse.java new file mode 100644 index 000000000..b679ae5a2 --- /dev/null +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/ingest/PutIpLocationDatabaseResponse.java @@ -0,0 +1,109 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package co.elastic.clients.elasticsearch.ingest; + +import co.elastic.clients.elasticsearch._types.AcknowledgedResponseBase; +import co.elastic.clients.json.JsonpDeserializable; +import co.elastic.clients.json.JsonpDeserializer; +import co.elastic.clients.json.ObjectBuilderDeserializer; +import co.elastic.clients.json.ObjectDeserializer; +import co.elastic.clients.util.ObjectBuilder; +import jakarta.json.stream.JsonGenerator; +import java.util.Objects; +import java.util.function.Function; + +//---------------------------------------------------------------- +// THIS CODE IS GENERATED. MANUAL EDITS WILL BE LOST. +//---------------------------------------------------------------- +// +// This code is generated from the Elasticsearch API specification +// at https://github.com/elastic/elasticsearch-specification +// +// Manual updates to this file will be lost when the code is +// re-generated. +// +// If you find a property that is missing or wrongly typed, please +// open an issue or a PR on the API specification repository. +// +//---------------------------------------------------------------- + +// typedef: ingest.put_ip_location_database.Response + +/** + * + * @see API + * specification + */ +@JsonpDeserializable +public class PutIpLocationDatabaseResponse extends AcknowledgedResponseBase { + // --------------------------------------------------------------------------------------------- + + private PutIpLocationDatabaseResponse(Builder builder) { + super(builder); + + } + + public static PutIpLocationDatabaseResponse of(Function> fn) { + return fn.apply(new Builder()).build(); + } + + // --------------------------------------------------------------------------------------------- + + /** + * Builder for {@link PutIpLocationDatabaseResponse}. + */ + + public static class Builder extends AcknowledgedResponseBase.AbstractBuilder + implements + ObjectBuilder { + @Override + protected Builder self() { + return this; + } + + /** + * Builds a {@link PutIpLocationDatabaseResponse}. + * + * @throws NullPointerException + * if some of the required fields are null. + */ + public PutIpLocationDatabaseResponse build() { + _checkSingleUse(); + + return new PutIpLocationDatabaseResponse(this); + } + } + + // --------------------------------------------------------------------------------------------- + + /** + * Json deserializer for {@link PutIpLocationDatabaseResponse} + */ + public static final JsonpDeserializer _DESERIALIZER = ObjectBuilderDeserializer + .lazy(Builder::new, PutIpLocationDatabaseResponse::setupPutIpLocationDatabaseResponseDeserializer); + + protected static void setupPutIpLocationDatabaseResponseDeserializer( + ObjectDeserializer op) { + AcknowledgedResponseBase.setupAcknowledgedResponseBaseDeserializer(op); + + } + +} diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/_types/mapping/DenseVectorElementType.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/ingest/Web.java similarity index 60% rename from java-client/src/main/java/co/elastic/clients/elasticsearch/_types/mapping/DenseVectorElementType.java rename to java-client/src/main/java/co/elastic/clients/elasticsearch/ingest/Web.java index 15678bbd4..9e5a42ebe 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/_types/mapping/DenseVectorElementType.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/ingest/Web.java @@ -17,11 +17,14 @@ * under the License. */ -package co.elastic.clients.elasticsearch._types.mapping; +package co.elastic.clients.elasticsearch.ingest; -import co.elastic.clients.json.JsonEnum; import co.elastic.clients.json.JsonpDeserializable; import co.elastic.clients.json.JsonpDeserializer; +import co.elastic.clients.json.JsonpMapper; +import co.elastic.clients.json.JsonpSerializable; +import co.elastic.clients.util.ObjectBuilder; +import jakarta.json.stream.JsonGenerator; //---------------------------------------------------------------- // THIS CODE IS GENERATED. MANUAL EDITS WILL BE LOST. @@ -40,43 +43,40 @@ /** * - * @see API + * @see API * specification */ @JsonpDeserializable -public enum DenseVectorElementType implements JsonEnum { +public class Web implements DatabaseConfigurationFullVariant, JsonpSerializable { + /** - * Indexes a single bit per dimension. Useful for very high-dimensional vectors - * or models that specifically support bit vectors. - *

- * NOTE: when using bit, the number of dimensions must be a - * multiple of 8 and must represent the number of bits. + * DatabaseConfigurationFull variant kind. */ - Bit("bit"), + @Override + public DatabaseConfigurationFull.Kind _databaseConfigurationFullKind() { + return DatabaseConfigurationFull.Kind.Web; + } + + public static final class Builder implements ObjectBuilder { + @Override + public Web build() { + return Web._INSTANCE; + } + } /** - * Indexes a 1-byte integer value per dimension. + * Serialize this object to JSON. */ - Byte("byte"), + public void serialize(JsonGenerator generator, JsonpMapper mapper) { + generator.writeStartObject(); + generator.writeEnd(); + } /** - * Indexes a 4-byte floating-point value per dimension. + * Singleton instance for empty class {@link Web}. */ - Float("float"), + public static final Web _INSTANCE = new Web(); - ; - - private final String jsonValue; - - DenseVectorElementType(String jsonValue) { - this.jsonValue = jsonValue; - } - - public String jsonValue() { - return this.jsonValue; - } + public static final JsonpDeserializer _DESERIALIZER = JsonpDeserializer.emptyObject(Web._INSTANCE); - public static final JsonEnum.Deserializer _DESERIALIZER = new JsonEnum.Deserializer<>( - DenseVectorElementType.values()); } diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/ingest/get_ip_location_database/DatabaseConfigurationMetadata.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/ingest/get_ip_location_database/DatabaseConfigurationMetadata.java new file mode 100644 index 000000000..4dc9b6398 --- /dev/null +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/ingest/get_ip_location_database/DatabaseConfigurationMetadata.java @@ -0,0 +1,273 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package co.elastic.clients.elasticsearch.ingest.get_ip_location_database; + +import co.elastic.clients.elasticsearch.ingest.DatabaseConfigurationFull; +import co.elastic.clients.json.JsonpDeserializable; +import co.elastic.clients.json.JsonpDeserializer; +import co.elastic.clients.json.JsonpMapper; +import co.elastic.clients.json.JsonpSerializable; +import co.elastic.clients.json.JsonpUtils; +import co.elastic.clients.json.ObjectBuilderDeserializer; +import co.elastic.clients.json.ObjectDeserializer; +import co.elastic.clients.util.ApiTypeHelper; +import co.elastic.clients.util.ObjectBuilder; +import co.elastic.clients.util.WithJsonObjectBuilderBase; +import jakarta.json.stream.JsonGenerator; +import java.lang.Long; +import java.lang.String; +import java.util.Objects; +import java.util.function.Function; +import javax.annotation.Nullable; + +//---------------------------------------------------------------- +// THIS CODE IS GENERATED. MANUAL EDITS WILL BE LOST. +//---------------------------------------------------------------- +// +// This code is generated from the Elasticsearch API specification +// at https://github.com/elastic/elasticsearch-specification +// +// Manual updates to this file will be lost when the code is +// re-generated. +// +// If you find a property that is missing or wrongly typed, please +// open an issue or a PR on the API specification repository. +// +//---------------------------------------------------------------- + +// typedef: ingest.get_ip_location_database.DatabaseConfigurationMetadata + +/** + * + * @see API + * specification + */ +@JsonpDeserializable +public class DatabaseConfigurationMetadata implements JsonpSerializable { + private final String id; + + private final long version; + + @Nullable + private final Long modifiedDateMillis; + + @Nullable + private final Long modifiedDate; + + private final DatabaseConfigurationFull database; + + // --------------------------------------------------------------------------------------------- + + private DatabaseConfigurationMetadata(Builder builder) { + + this.id = ApiTypeHelper.requireNonNull(builder.id, this, "id"); + this.version = ApiTypeHelper.requireNonNull(builder.version, this, "version"); + this.modifiedDateMillis = builder.modifiedDateMillis; + this.modifiedDate = builder.modifiedDate; + this.database = ApiTypeHelper.requireNonNull(builder.database, this, "database"); + + } + + public static DatabaseConfigurationMetadata of(Function> fn) { + return fn.apply(new Builder()).build(); + } + + /** + * Required - API name: {@code id} + */ + public final String id() { + return this.id; + } + + /** + * Required - API name: {@code version} + */ + public final long version() { + return this.version; + } + + /** + * API name: {@code modified_date_millis} + */ + @Nullable + public final Long modifiedDateMillis() { + return this.modifiedDateMillis; + } + + /** + * API name: {@code modified_date} + */ + @Nullable + public final Long modifiedDate() { + return this.modifiedDate; + } + + /** + * Required - API name: {@code database} + */ + public final DatabaseConfigurationFull database() { + return this.database; + } + + /** + * Serialize this object to JSON. + */ + public void serialize(JsonGenerator generator, JsonpMapper mapper) { + generator.writeStartObject(); + serializeInternal(generator, mapper); + generator.writeEnd(); + } + + protected void serializeInternal(JsonGenerator generator, JsonpMapper mapper) { + + generator.writeKey("id"); + generator.write(this.id); + + generator.writeKey("version"); + generator.write(this.version); + + if (this.modifiedDateMillis != null) { + generator.writeKey("modified_date_millis"); + generator.write(this.modifiedDateMillis); + + } + if (this.modifiedDate != null) { + generator.writeKey("modified_date"); + generator.write(this.modifiedDate); + + } + generator.writeKey("database"); + this.database.serialize(generator, mapper); + + } + + @Override + public String toString() { + return JsonpUtils.toString(this); + } + + // --------------------------------------------------------------------------------------------- + + /** + * Builder for {@link DatabaseConfigurationMetadata}. + */ + + public static class Builder extends WithJsonObjectBuilderBase + implements + ObjectBuilder { + private String id; + + private Long version; + + @Nullable + private Long modifiedDateMillis; + + @Nullable + private Long modifiedDate; + + private DatabaseConfigurationFull database; + + /** + * Required - API name: {@code id} + */ + public final Builder id(String value) { + this.id = value; + return this; + } + + /** + * Required - API name: {@code version} + */ + public final Builder version(long value) { + this.version = value; + return this; + } + + /** + * API name: {@code modified_date_millis} + */ + public final Builder modifiedDateMillis(@Nullable Long value) { + this.modifiedDateMillis = value; + return this; + } + + /** + * API name: {@code modified_date} + */ + public final Builder modifiedDate(@Nullable Long value) { + this.modifiedDate = value; + return this; + } + + /** + * Required - API name: {@code database} + */ + public final Builder database(DatabaseConfigurationFull value) { + this.database = value; + return this; + } + + /** + * Required - API name: {@code database} + */ + public final Builder database( + Function> fn) { + return this.database(fn.apply(new DatabaseConfigurationFull.Builder()).build()); + } + + @Override + protected Builder self() { + return this; + } + + /** + * Builds a {@link DatabaseConfigurationMetadata}. + * + * @throws NullPointerException + * if some of the required fields are null. + */ + public DatabaseConfigurationMetadata build() { + _checkSingleUse(); + + return new DatabaseConfigurationMetadata(this); + } + } + + // --------------------------------------------------------------------------------------------- + + /** + * Json deserializer for {@link DatabaseConfigurationMetadata} + */ + public static final JsonpDeserializer _DESERIALIZER = ObjectBuilderDeserializer + .lazy(Builder::new, DatabaseConfigurationMetadata::setupDatabaseConfigurationMetadataDeserializer); + + protected static void setupDatabaseConfigurationMetadataDeserializer( + ObjectDeserializer op) { + + op.add(Builder::id, JsonpDeserializer.stringDeserializer(), "id"); + op.add(Builder::version, JsonpDeserializer.longDeserializer(), "version"); + op.add(Builder::modifiedDateMillis, JsonpDeserializer.longDeserializer(), "modified_date_millis"); + op.add(Builder::modifiedDate, JsonpDeserializer.longDeserializer(), "modified_date"); + op.add(Builder::database, DatabaseConfigurationFull._DESERIALIZER, "database"); + + } + +} diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/logstash/DeletePipelineRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/logstash/DeletePipelineRequest.java index ae6b4a3c0..ffa231af7 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/logstash/DeletePipelineRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/logstash/DeletePipelineRequest.java @@ -58,9 +58,7 @@ // typedef: logstash.delete_pipeline.Request /** - * Delete a Logstash pipeline. - *

- * Delete a pipeline that is used for Logstash Central Management. + * Deletes a pipeline used for Logstash Central Management. * * @see API @@ -83,7 +81,7 @@ public static DeletePipelineRequest of(Function * API name: {@code id} */ @@ -103,7 +101,7 @@ public static class Builder extends RequestBase.AbstractBuilder private String id; /** - * Required - An identifier for the pipeline. + * Required - Identifier for the pipeline. *

* API name: {@code id} */ diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/logstash/ElasticsearchLogstashAsyncClient.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/logstash/ElasticsearchLogstashAsyncClient.java index 772cd4613..af70c837a 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/logstash/ElasticsearchLogstashAsyncClient.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/logstash/ElasticsearchLogstashAsyncClient.java @@ -71,9 +71,7 @@ public ElasticsearchLogstashAsyncClient withTransportOptions(@Nullable Transport // ----- Endpoint: logstash.delete_pipeline /** - * Delete a Logstash pipeline. - *

- * Delete a pipeline that is used for Logstash Central Management. + * Deletes a pipeline used for Logstash Central Management. * * @see Documentation @@ -88,9 +86,7 @@ public CompletableFuture deletePipeline(DeletePipelineRequest r } /** - * Delete a Logstash pipeline. - *

- * Delete a pipeline that is used for Logstash Central Management. + * Deletes a pipeline used for Logstash Central Management. * * @param fn * a function that initializes a builder to create the @@ -108,9 +104,7 @@ public final CompletableFuture deletePipeline( // ----- Endpoint: logstash.get_pipeline /** - * Get Logstash pipelines. - *

- * Get pipelines that are used for Logstash Central Management. + * Retrieves pipelines used for Logstash Central Management. * * @see Documentation @@ -125,9 +119,7 @@ public CompletableFuture getPipeline(GetPipelineRequest req } /** - * Get Logstash pipelines. - *

- * Get pipelines that are used for Logstash Central Management. + * Retrieves pipelines used for Logstash Central Management. * * @param fn * a function that initializes a builder to create the @@ -143,9 +135,7 @@ public final CompletableFuture getPipeline( } /** - * Get Logstash pipelines. - *

- * Get pipelines that are used for Logstash Central Management. + * Retrieves pipelines used for Logstash Central Management. * * @see Documentation @@ -160,10 +150,7 @@ public CompletableFuture getPipeline() { // ----- Endpoint: logstash.put_pipeline /** - * Create or update a Logstash pipeline. - *

- * Create a pipeline that is used for Logstash Central Management. If the - * specified pipeline exists, it is replaced. + * Creates or updates a pipeline used for Logstash Central Management. * * @see Documentation @@ -178,10 +165,7 @@ public CompletableFuture putPipeline(PutPipelineRequest request } /** - * Create or update a Logstash pipeline. - *

- * Create a pipeline that is used for Logstash Central Management. If the - * specified pipeline exists, it is replaced. + * Creates or updates a pipeline used for Logstash Central Management. * * @param fn * a function that initializes a builder to create the diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/logstash/ElasticsearchLogstashClient.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/logstash/ElasticsearchLogstashClient.java index 874f6cdd7..d0fa36a78 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/logstash/ElasticsearchLogstashClient.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/logstash/ElasticsearchLogstashClient.java @@ -69,9 +69,7 @@ public ElasticsearchLogstashClient withTransportOptions(@Nullable TransportOptio // ----- Endpoint: logstash.delete_pipeline /** - * Delete a Logstash pipeline. - *

- * Delete a pipeline that is used for Logstash Central Management. + * Deletes a pipeline used for Logstash Central Management. * * @see Documentation @@ -86,9 +84,7 @@ public BooleanResponse deletePipeline(DeletePipelineRequest request) throws IOEx } /** - * Delete a Logstash pipeline. - *

- * Delete a pipeline that is used for Logstash Central Management. + * Deletes a pipeline used for Logstash Central Management. * * @param fn * a function that initializes a builder to create the @@ -107,9 +103,7 @@ public final BooleanResponse deletePipeline( // ----- Endpoint: logstash.get_pipeline /** - * Get Logstash pipelines. - *

- * Get pipelines that are used for Logstash Central Management. + * Retrieves pipelines used for Logstash Central Management. * * @see Documentation @@ -124,9 +118,7 @@ public GetPipelineResponse getPipeline(GetPipelineRequest request) throws IOExce } /** - * Get Logstash pipelines. - *

- * Get pipelines that are used for Logstash Central Management. + * Retrieves pipelines used for Logstash Central Management. * * @param fn * a function that initializes a builder to create the @@ -143,9 +135,7 @@ public final GetPipelineResponse getPipeline( } /** - * Get Logstash pipelines. - *

- * Get pipelines that are used for Logstash Central Management. + * Retrieves pipelines used for Logstash Central Management. * * @see Documentation @@ -160,10 +150,7 @@ public GetPipelineResponse getPipeline() throws IOException, ElasticsearchExcept // ----- Endpoint: logstash.put_pipeline /** - * Create or update a Logstash pipeline. - *

- * Create a pipeline that is used for Logstash Central Management. If the - * specified pipeline exists, it is replaced. + * Creates or updates a pipeline used for Logstash Central Management. * * @see Documentation @@ -178,10 +165,7 @@ public BooleanResponse putPipeline(PutPipelineRequest request) throws IOExceptio } /** - * Create or update a Logstash pipeline. - *

- * Create a pipeline that is used for Logstash Central Management. If the - * specified pipeline exists, it is replaced. + * Creates or updates a pipeline used for Logstash Central Management. * * @param fn * a function that initializes a builder to create the diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/logstash/GetPipelineRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/logstash/GetPipelineRequest.java index b3e608143..bedf522d9 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/logstash/GetPipelineRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/logstash/GetPipelineRequest.java @@ -58,9 +58,7 @@ // typedef: logstash.get_pipeline.Request /** - * Get Logstash pipelines. - *

- * Get pipelines that are used for Logstash Central Management. + * Retrieves pipelines used for Logstash Central Management. * * @see API * specification @@ -82,7 +80,7 @@ public static GetPipelineRequest of(Function * API name: {@code id} */ @@ -103,7 +101,7 @@ public static class Builder extends RequestBase.AbstractBuilder private List id; /** - * A comma-separated list of pipeline identifiers. + * Comma-separated list of pipeline identifiers. *

* API name: {@code id} *

@@ -115,7 +113,7 @@ public final Builder id(List list) { } /** - * A comma-separated list of pipeline identifiers. + * Comma-separated list of pipeline identifiers. *

* API name: {@code id} *

diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/logstash/Pipeline.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/logstash/Pipeline.java index 3619ab418..069e7da6d 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/logstash/Pipeline.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/logstash/Pipeline.java @@ -64,24 +64,24 @@ public class Pipeline implements JsonpSerializable { private final DateTime lastModified; - private final String pipeline; - private final PipelineMetadata pipelineMetadata; - private final PipelineSettings pipelineSettings; - private final String username; + private final String pipeline; + + private final PipelineSettings pipelineSettings; + // --------------------------------------------------------------------------------------------- private Pipeline(Builder builder) { this.description = ApiTypeHelper.requireNonNull(builder.description, this, "description"); this.lastModified = ApiTypeHelper.requireNonNull(builder.lastModified, this, "lastModified"); - this.pipeline = ApiTypeHelper.requireNonNull(builder.pipeline, this, "pipeline"); this.pipelineMetadata = ApiTypeHelper.requireNonNull(builder.pipelineMetadata, this, "pipelineMetadata"); - this.pipelineSettings = ApiTypeHelper.requireNonNull(builder.pipelineSettings, this, "pipelineSettings"); this.username = ApiTypeHelper.requireNonNull(builder.username, this, "username"); + this.pipeline = ApiTypeHelper.requireNonNull(builder.pipeline, this, "pipeline"); + this.pipelineSettings = ApiTypeHelper.requireNonNull(builder.pipelineSettings, this, "pipelineSettings"); } @@ -90,7 +90,7 @@ public static Pipeline of(Function> fn) { } /** - * Required - A description of the pipeline. This description is not used by + * Required - Description of the pipeline. This description is not used by * Elasticsearch or Logstash. *

* API name: {@code description} @@ -100,7 +100,7 @@ public final String description() { } /** - * Required - The date the pipeline was last updated. It must be in the + * Required - Date the pipeline was last updated. Must be in the * yyyy-MM-dd'T'HH:mm:ss.SSSZZ strict_date_time format. *

* API name: {@code last_modified} @@ -110,41 +110,41 @@ public final DateTime lastModified() { } /** - * Required - The configuration for the pipeline. + * Required - Optional metadata about the pipeline. May have any contents. This + * metadata is not generated or used by Elasticsearch or Logstash. *

- * API name: {@code pipeline} + * API name: {@code pipeline_metadata} */ - public final String pipeline() { - return this.pipeline; + public final PipelineMetadata pipelineMetadata() { + return this.pipelineMetadata; } /** - * Required - Optional metadata about the pipeline, which can have any contents. - * This metadata is not generated or used by Elasticsearch or Logstash. + * Required - User who last updated the pipeline. *

- * API name: {@code pipeline_metadata} + * API name: {@code username} */ - public final PipelineMetadata pipelineMetadata() { - return this.pipelineMetadata; + public final String username() { + return this.username; } /** - * Required - Settings for the pipeline. It supports only flat keys in dot - * notation. + * Required - Configuration for the pipeline. *

- * API name: {@code pipeline_settings} + * API name: {@code pipeline} */ - public final PipelineSettings pipelineSettings() { - return this.pipelineSettings; + public final String pipeline() { + return this.pipeline; } /** - * Required - The user who last updated the pipeline. + * Required - Settings for the pipeline. Supports only flat keys in dot + * notation. *

- * API name: {@code username} + * API name: {@code pipeline_settings} */ - public final String username() { - return this.username; + public final PipelineSettings pipelineSettings() { + return this.pipelineSettings; } /** @@ -163,18 +163,18 @@ protected void serializeInternal(JsonGenerator generator, JsonpMapper mapper) { generator.writeKey("last_modified"); this.lastModified.serialize(generator, mapper); - generator.writeKey("pipeline"); - generator.write(this.pipeline); - generator.writeKey("pipeline_metadata"); this.pipelineMetadata.serialize(generator, mapper); - generator.writeKey("pipeline_settings"); - this.pipelineSettings.serialize(generator, mapper); - generator.writeKey("username"); generator.write(this.username); + generator.writeKey("pipeline"); + generator.write(this.pipeline); + + generator.writeKey("pipeline_settings"); + this.pipelineSettings.serialize(generator, mapper); + } @Override @@ -193,16 +193,16 @@ public static class Builder extends WithJsonObjectBuilderBase implement private DateTime lastModified; - private String pipeline; - private PipelineMetadata pipelineMetadata; - private PipelineSettings pipelineSettings; - private String username; + private String pipeline; + + private PipelineSettings pipelineSettings; + /** - * Required - A description of the pipeline. This description is not used by + * Required - Description of the pipeline. This description is not used by * Elasticsearch or Logstash. *

* API name: {@code description} @@ -213,7 +213,7 @@ public final Builder description(String value) { } /** - * Required - The date the pipeline was last updated. It must be in the + * Required - Date the pipeline was last updated. Must be in the * yyyy-MM-dd'T'HH:mm:ss.SSSZZ strict_date_time format. *

* API name: {@code last_modified} @@ -224,38 +224,48 @@ public final Builder lastModified(DateTime value) { } /** - * Required - The configuration for the pipeline. + * Required - Optional metadata about the pipeline. May have any contents. This + * metadata is not generated or used by Elasticsearch or Logstash. *

- * API name: {@code pipeline} + * API name: {@code pipeline_metadata} */ - public final Builder pipeline(String value) { - this.pipeline = value; + public final Builder pipelineMetadata(PipelineMetadata value) { + this.pipelineMetadata = value; return this; } /** - * Required - Optional metadata about the pipeline, which can have any contents. - * This metadata is not generated or used by Elasticsearch or Logstash. + * Required - Optional metadata about the pipeline. May have any contents. This + * metadata is not generated or used by Elasticsearch or Logstash. *

* API name: {@code pipeline_metadata} */ - public final Builder pipelineMetadata(PipelineMetadata value) { - this.pipelineMetadata = value; + public final Builder pipelineMetadata(Function> fn) { + return this.pipelineMetadata(fn.apply(new PipelineMetadata.Builder()).build()); + } + + /** + * Required - User who last updated the pipeline. + *

+ * API name: {@code username} + */ + public final Builder username(String value) { + this.username = value; return this; } /** - * Required - Optional metadata about the pipeline, which can have any contents. - * This metadata is not generated or used by Elasticsearch or Logstash. + * Required - Configuration for the pipeline. *

- * API name: {@code pipeline_metadata} + * API name: {@code pipeline} */ - public final Builder pipelineMetadata(Function> fn) { - return this.pipelineMetadata(fn.apply(new PipelineMetadata.Builder()).build()); + public final Builder pipeline(String value) { + this.pipeline = value; + return this; } /** - * Required - Settings for the pipeline. It supports only flat keys in dot + * Required - Settings for the pipeline. Supports only flat keys in dot * notation. *

* API name: {@code pipeline_settings} @@ -266,7 +276,7 @@ public final Builder pipelineSettings(PipelineSettings value) { } /** - * Required - Settings for the pipeline. It supports only flat keys in dot + * Required - Settings for the pipeline. Supports only flat keys in dot * notation. *

* API name: {@code pipeline_settings} @@ -275,16 +285,6 @@ public final Builder pipelineSettings(Function - * API name: {@code username} - */ - public final Builder username(String value) { - this.username = value; - return this; - } - @Override protected Builder self() { return this; @@ -315,10 +315,10 @@ protected static void setupPipelineDeserializer(ObjectDeserializer - * Create a pipeline that is used for Logstash Central Management. If the - * specified pipeline exists, it is replaced. + * Creates or updates a pipeline used for Logstash Central Management. * * @see API * specification @@ -89,7 +86,7 @@ public static PutPipelineRequest of(Function * API name: {@code id} */ @@ -126,7 +123,7 @@ public static class Builder extends RequestBase.AbstractBuilder private Pipeline pipeline; /** - * Required - An identifier for the pipeline. + * Required - Identifier for the pipeline. *

* API name: {@code id} */ diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/migration/DeprecationsRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/migration/DeprecationsRequest.java index 394685c4c..852165c22 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/migration/DeprecationsRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/migration/DeprecationsRequest.java @@ -55,12 +55,9 @@ // typedef: migration.deprecations.Request /** - * Get deprecation information. Get information about different cluster, node, - * and index level settings that use deprecated features that will be removed or - * changed in the next major version. - *

- * TIP: This APIs is designed for indirect use by the Upgrade Assistant. We - * strongly recommend you use the Upgrade Assistant. + * Retrieves information about different cluster, node, and index level settings + * that use deprecated features that will be removed or changed in the next + * major version. * * @see API * specification diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/migration/ElasticsearchMigrationAsyncClient.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/migration/ElasticsearchMigrationAsyncClient.java index 194c9aa01..a897b8d56 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/migration/ElasticsearchMigrationAsyncClient.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/migration/ElasticsearchMigrationAsyncClient.java @@ -70,12 +70,9 @@ public ElasticsearchMigrationAsyncClient withTransportOptions(@Nullable Transpor // ----- Endpoint: migration.deprecations /** - * Get deprecation information. Get information about different cluster, node, - * and index level settings that use deprecated features that will be removed or - * changed in the next major version. - *

- * TIP: This APIs is designed for indirect use by the Upgrade Assistant. We - * strongly recommend you use the Upgrade Assistant. + * Retrieves information about different cluster, node, and index level settings + * that use deprecated features that will be removed or changed in the next + * major version. * * @see Documentation @@ -90,12 +87,9 @@ public CompletableFuture deprecations(DeprecationsRequest } /** - * Get deprecation information. Get information about different cluster, node, - * and index level settings that use deprecated features that will be removed or - * changed in the next major version. - *

- * TIP: This APIs is designed for indirect use by the Upgrade Assistant. We - * strongly recommend you use the Upgrade Assistant. + * Retrieves information about different cluster, node, and index level settings + * that use deprecated features that will be removed or changed in the next + * major version. * * @param fn * a function that initializes a builder to create the @@ -111,12 +105,9 @@ public final CompletableFuture deprecations( } /** - * Get deprecation information. Get information about different cluster, node, - * and index level settings that use deprecated features that will be removed or - * changed in the next major version. - *

- * TIP: This APIs is designed for indirect use by the Upgrade Assistant. We - * strongly recommend you use the Upgrade Assistant. + * Retrieves information about different cluster, node, and index level settings + * that use deprecated features that will be removed or changed in the next + * major version. * * @see Documentation @@ -131,13 +122,7 @@ public CompletableFuture deprecations() { // ----- Endpoint: migration.get_feature_upgrade_status /** - * Get feature migration information. Version upgrades sometimes require changes - * to how features store configuration information and data in system indices. - * Check which features need to be migrated and the status of any migrations - * that are in progress. - *

- * TIP: This API is designed for indirect use by the Upgrade Assistant. We - * strongly recommend you use the Upgrade Assistant. + * Find out whether system features need to be upgraded or not * * @see Documentation @@ -151,15 +136,7 @@ public CompletableFuture getFeatureUpgradeStatu // ----- Endpoint: migration.post_feature_upgrade /** - * Start the feature migration. Version upgrades sometimes require changes to - * how features store configuration information and data in system indices. This - * API starts the automatic migration process. - *

- * Some functionality might be temporarily unavailable during the migration - * process. - *

- * TIP: The API is designed for indirect use by the Upgrade Assistant. We - * strongly recommend you use the Upgrade Assistant. + * Begin upgrades for system features * * @see Documentation diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/migration/ElasticsearchMigrationClient.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/migration/ElasticsearchMigrationClient.java index b9e21bb84..0ec1c3ffd 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/migration/ElasticsearchMigrationClient.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/migration/ElasticsearchMigrationClient.java @@ -68,12 +68,9 @@ public ElasticsearchMigrationClient withTransportOptions(@Nullable TransportOpti // ----- Endpoint: migration.deprecations /** - * Get deprecation information. Get information about different cluster, node, - * and index level settings that use deprecated features that will be removed or - * changed in the next major version. - *

- * TIP: This APIs is designed for indirect use by the Upgrade Assistant. We - * strongly recommend you use the Upgrade Assistant. + * Retrieves information about different cluster, node, and index level settings + * that use deprecated features that will be removed or changed in the next + * major version. * * @see Documentation @@ -88,12 +85,9 @@ public DeprecationsResponse deprecations(DeprecationsRequest request) throws IOE } /** - * Get deprecation information. Get information about different cluster, node, - * and index level settings that use deprecated features that will be removed or - * changed in the next major version. - *

- * TIP: This APIs is designed for indirect use by the Upgrade Assistant. We - * strongly recommend you use the Upgrade Assistant. + * Retrieves information about different cluster, node, and index level settings + * that use deprecated features that will be removed or changed in the next + * major version. * * @param fn * a function that initializes a builder to create the @@ -110,12 +104,9 @@ public final DeprecationsResponse deprecations( } /** - * Get deprecation information. Get information about different cluster, node, - * and index level settings that use deprecated features that will be removed or - * changed in the next major version. - *

- * TIP: This APIs is designed for indirect use by the Upgrade Assistant. We - * strongly recommend you use the Upgrade Assistant. + * Retrieves information about different cluster, node, and index level settings + * that use deprecated features that will be removed or changed in the next + * major version. * * @see Documentation @@ -130,13 +121,7 @@ public DeprecationsResponse deprecations() throws IOException, ElasticsearchExce // ----- Endpoint: migration.get_feature_upgrade_status /** - * Get feature migration information. Version upgrades sometimes require changes - * to how features store configuration information and data in system indices. - * Check which features need to be migrated and the status of any migrations - * that are in progress. - *

- * TIP: This API is designed for indirect use by the Upgrade Assistant. We - * strongly recommend you use the Upgrade Assistant. + * Find out whether system features need to be upgraded or not * * @see Documentation @@ -150,15 +135,7 @@ public GetFeatureUpgradeStatusResponse getFeatureUpgradeStatus() throws IOExcept // ----- Endpoint: migration.post_feature_upgrade /** - * Start the feature migration. Version upgrades sometimes require changes to - * how features store configuration information and data in system indices. This - * API starts the automatic migration process. - *

- * Some functionality might be temporarily unavailable during the migration - * process. - *

- * TIP: The API is designed for indirect use by the Upgrade Assistant. We - * strongly recommend you use the Upgrade Assistant. + * Begin upgrades for system features * * @see Documentation diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/migration/GetFeatureUpgradeStatusRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/migration/GetFeatureUpgradeStatusRequest.java index f9d0cf485..27411dc69 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/migration/GetFeatureUpgradeStatusRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/migration/GetFeatureUpgradeStatusRequest.java @@ -50,13 +50,7 @@ // typedef: migration.get_feature_upgrade_status.Request /** - * Get feature migration information. Version upgrades sometimes require changes - * to how features store configuration information and data in system indices. - * Check which features need to be migrated and the status of any migrations - * that are in progress. - *

- * TIP: This API is designed for indirect use by the Upgrade Assistant. We - * strongly recommend you use the Upgrade Assistant. + * Find out whether system features need to be upgraded or not * * @see API diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/migration/PostFeatureUpgradeRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/migration/PostFeatureUpgradeRequest.java index 95e329319..c016b79f8 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/migration/PostFeatureUpgradeRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/migration/PostFeatureUpgradeRequest.java @@ -50,15 +50,7 @@ // typedef: migration.post_feature_upgrade.Request /** - * Start the feature migration. Version upgrades sometimes require changes to - * how features store configuration information and data in system indices. This - * API starts the automatic migration process. - *

- * Some functionality might be temporarily unavailable during the migration - * process. - *

- * TIP: The API is designed for indirect use by the Upgrade Assistant. We - * strongly recommend you use the Upgrade Assistant. + * Begin upgrades for system features * * @see API diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/ml/CalendarEvent.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/ml/CalendarEvent.java index 20c86c0ae..ebea147de 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/ml/CalendarEvent.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/ml/CalendarEvent.java @@ -31,8 +31,6 @@ import co.elastic.clients.util.ObjectBuilder; import co.elastic.clients.util.WithJsonObjectBuilderBase; import jakarta.json.stream.JsonGenerator; -import java.lang.Boolean; -import java.lang.Integer; import java.lang.String; import java.util.Objects; import java.util.function.Function; @@ -74,15 +72,6 @@ public class CalendarEvent implements JsonpSerializable { private final DateTime startTime; - @Nullable - private final Boolean skipResult; - - @Nullable - private final Boolean skipModelUpdate; - - @Nullable - private final Integer forceTimeShift; - // --------------------------------------------------------------------------------------------- private CalendarEvent(Builder builder) { @@ -92,9 +81,6 @@ private CalendarEvent(Builder builder) { this.description = ApiTypeHelper.requireNonNull(builder.description, this, "description"); this.endTime = ApiTypeHelper.requireNonNull(builder.endTime, this, "endTime"); this.startTime = ApiTypeHelper.requireNonNull(builder.startTime, this, "startTime"); - this.skipResult = builder.skipResult; - this.skipModelUpdate = builder.skipModelUpdate; - this.forceTimeShift = builder.forceTimeShift; } @@ -149,37 +135,6 @@ public final DateTime startTime() { return this.startTime; } - /** - * When true the model will not create results for this calendar period. - *

- * API name: {@code skip_result} - */ - @Nullable - public final Boolean skipResult() { - return this.skipResult; - } - - /** - * When true the model will not be updated for this calendar period. - *

- * API name: {@code skip_model_update} - */ - @Nullable - public final Boolean skipModelUpdate() { - return this.skipModelUpdate; - } - - /** - * Shift time by this many seconds. For example adjust time for daylight savings - * changes - *

- * API name: {@code force_time_shift} - */ - @Nullable - public final Integer forceTimeShift() { - return this.forceTimeShift; - } - /** * Serialize this object to JSON. */ @@ -208,21 +163,6 @@ protected void serializeInternal(JsonGenerator generator, JsonpMapper mapper) { this.endTime.serialize(generator, mapper); generator.writeKey("start_time"); this.startTime.serialize(generator, mapper); - if (this.skipResult != null) { - generator.writeKey("skip_result"); - generator.write(this.skipResult); - - } - if (this.skipModelUpdate != null) { - generator.writeKey("skip_model_update"); - generator.write(this.skipModelUpdate); - - } - if (this.forceTimeShift != null) { - generator.writeKey("force_time_shift"); - generator.write(this.forceTimeShift); - - } } @@ -250,15 +190,6 @@ public static class Builder extends WithJsonObjectBuilderBase implement private DateTime startTime; - @Nullable - private Boolean skipResult; - - @Nullable - private Boolean skipModelUpdate; - - @Nullable - private Integer forceTimeShift; - /** * A string that uniquely identifies a calendar. *

@@ -309,37 +240,6 @@ public final Builder startTime(DateTime value) { return this; } - /** - * When true the model will not create results for this calendar period. - *

- * API name: {@code skip_result} - */ - public final Builder skipResult(@Nullable Boolean value) { - this.skipResult = value; - return this; - } - - /** - * When true the model will not be updated for this calendar period. - *

- * API name: {@code skip_model_update} - */ - public final Builder skipModelUpdate(@Nullable Boolean value) { - this.skipModelUpdate = value; - return this; - } - - /** - * Shift time by this many seconds. For example adjust time for daylight savings - * changes - *

- * API name: {@code force_time_shift} - */ - public final Builder forceTimeShift(@Nullable Integer value) { - this.forceTimeShift = value; - return this; - } - @Override protected Builder self() { return this; @@ -373,9 +273,6 @@ protected static void setupCalendarEventDeserializer(ObjectDeserializer inferTrainedModel( // ----- Endpoint: ml.info /** - * Get machine learning information. Get defaults and limits used by machine + * Return ML defaults and limits. Returns defaults and limits used by machine * learning. This endpoint is designed to be used by a user interface that needs * to fully understand machine learning configurations where some options are * not specified, meaning that the defaults should be used. This endpoint may be @@ -2236,14 +2236,9 @@ public final CompletableFuture putDataFrameAnalyt * an anomaly detection job. You can associate only one datafeed with each * anomaly detection job. The datafeed contains a query that runs at a defined * interval (frequency). If you are concerned about delayed data, - * you can add a delay (query_delay) at each interval. When - * Elasticsearch security features are enabled, your datafeed remembers which - * roles the user who created it had at the time of creation and runs the query - * using those same roles. If you provide secondary authorization headers, those - * credentials are used instead. You must use Kibana, this API, or the create - * anomaly detection jobs API to create a datafeed. Do not add a datafeed - * directly to the .ml-config index. Do not give users - * write privileges on the .ml-config index. + * you can add a delay + * (query_delay') at each interval. When Elasticsearch security features are enabled, your datafeed remembers which roles the user who created it had at the time of creation and runs the query using those same roles. If you provide secondary authorization headers, those credentials are used instead. You must use Kibana, this API, or the create anomaly detection jobs API to create a datafeed. Do not add a datafeed directly to the .ml-configindex. Do not give userswriteprivileges on the.ml-config` + * index. * * @see Documentation @@ -2262,14 +2257,9 @@ public CompletableFuture putDatafeed(PutDatafeedRequest req * an anomaly detection job. You can associate only one datafeed with each * anomaly detection job. The datafeed contains a query that runs at a defined * interval (frequency). If you are concerned about delayed data, - * you can add a delay (query_delay) at each interval. When - * Elasticsearch security features are enabled, your datafeed remembers which - * roles the user who created it had at the time of creation and runs the query - * using those same roles. If you provide secondary authorization headers, those - * credentials are used instead. You must use Kibana, this API, or the create - * anomaly detection jobs API to create a datafeed. Do not add a datafeed - * directly to the .ml-config index. Do not give users - * write privileges on the .ml-config index. + * you can add a delay + * (query_delay') at each interval. When Elasticsearch security features are enabled, your datafeed remembers which roles the user who created it had at the time of creation and runs the query using those same roles. If you provide secondary authorization headers, those credentials are used instead. You must use Kibana, this API, or the create anomaly detection jobs API to create a datafeed. Do not add a datafeed directly to the .ml-configindex. Do not give userswriteprivileges on the.ml-config` + * index. * * @param fn * a function that initializes a builder to create the @@ -3256,7 +3246,7 @@ public CompletableFuture validate() { // ----- Endpoint: ml.validate_detector /** - * Validate an anomaly detection job. + * Validates an anomaly detection detector. * * @see Documentation @@ -3271,7 +3261,7 @@ public CompletableFuture validateDetector(ValidateDete } /** - * Validate an anomaly detection job. + * Validates an anomaly detection detector. * * @param fn * a function that initializes a builder to create the @@ -3287,7 +3277,7 @@ public final CompletableFuture validateDetector( } /** - * Validate an anomaly detection job. + * Validates an anomaly detection detector. * * @see Documentation diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/ml/ElasticsearchMlClient.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/ml/ElasticsearchMlClient.java index b96659890..347da76d1 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/ml/ElasticsearchMlClient.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/ml/ElasticsearchMlClient.java @@ -1883,7 +1883,7 @@ public final InferTrainedModelResponse inferTrainedModel( // ----- Endpoint: ml.info /** - * Get machine learning information. Get defaults and limits used by machine + * Return ML defaults and limits. Returns defaults and limits used by machine * learning. This endpoint is designed to be used by a user interface that needs * to fully understand machine learning configurations where some options are * not specified, meaning that the defaults should be used. This endpoint may be @@ -2296,14 +2296,9 @@ public final PutDataFrameAnalyticsResponse putDataFrameAnalytics( * an anomaly detection job. You can associate only one datafeed with each * anomaly detection job. The datafeed contains a query that runs at a defined * interval (frequency). If you are concerned about delayed data, - * you can add a delay (query_delay) at each interval. When - * Elasticsearch security features are enabled, your datafeed remembers which - * roles the user who created it had at the time of creation and runs the query - * using those same roles. If you provide secondary authorization headers, those - * credentials are used instead. You must use Kibana, this API, or the create - * anomaly detection jobs API to create a datafeed. Do not add a datafeed - * directly to the .ml-config index. Do not give users - * write privileges on the .ml-config index. + * you can add a delay + * (query_delay') at each interval. When Elasticsearch security features are enabled, your datafeed remembers which roles the user who created it had at the time of creation and runs the query using those same roles. If you provide secondary authorization headers, those credentials are used instead. You must use Kibana, this API, or the create anomaly detection jobs API to create a datafeed. Do not add a datafeed directly to the .ml-configindex. Do not give userswriteprivileges on the.ml-config` + * index. * * @see Documentation @@ -2322,14 +2317,9 @@ public PutDatafeedResponse putDatafeed(PutDatafeedRequest request) throws IOExce * an anomaly detection job. You can associate only one datafeed with each * anomaly detection job. The datafeed contains a query that runs at a defined * interval (frequency). If you are concerned about delayed data, - * you can add a delay (query_delay) at each interval. When - * Elasticsearch security features are enabled, your datafeed remembers which - * roles the user who created it had at the time of creation and runs the query - * using those same roles. If you provide secondary authorization headers, those - * credentials are used instead. You must use Kibana, this API, or the create - * anomaly detection jobs API to create a datafeed. Do not add a datafeed - * directly to the .ml-config index. Do not give users - * write privileges on the .ml-config index. + * you can add a delay + * (query_delay') at each interval. When Elasticsearch security features are enabled, your datafeed remembers which roles the user who created it had at the time of creation and runs the query using those same roles. If you provide secondary authorization headers, those credentials are used instead. You must use Kibana, this API, or the create anomaly detection jobs API to create a datafeed. Do not add a datafeed directly to the .ml-configindex. Do not give userswriteprivileges on the.ml-config` + * index. * * @param fn * a function that initializes a builder to create the @@ -3343,7 +3333,7 @@ public ValidateResponse validate() throws IOException, ElasticsearchException { // ----- Endpoint: ml.validate_detector /** - * Validate an anomaly detection job. + * Validates an anomaly detection detector. * * @see Documentation @@ -3359,7 +3349,7 @@ public ValidateDetectorResponse validateDetector(ValidateDetectorRequest request } /** - * Validate an anomaly detection job. + * Validates an anomaly detection detector. * * @param fn * a function that initializes a builder to create the @@ -3376,7 +3366,7 @@ public final ValidateDetectorResponse validateDetector( } /** - * Validate an anomaly detection job. + * Validates an anomaly detection detector. * * @see Documentation diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/ml/MlInfoRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/ml/MlInfoRequest.java index ebc509a2e..eb99d7a47 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/ml/MlInfoRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/ml/MlInfoRequest.java @@ -50,7 +50,7 @@ // typedef: ml.info.Request /** - * Get machine learning information. Get defaults and limits used by machine + * Return ML defaults and limits. Returns defaults and limits used by machine * learning. This endpoint is designed to be used by a user interface that needs * to fully understand machine learning configurations where some options are * not specified, meaning that the defaults should be used. This endpoint may be diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/ml/PutDatafeedRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/ml/PutDatafeedRequest.java index 959991750..7afc7468d 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/ml/PutDatafeedRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/ml/PutDatafeedRequest.java @@ -72,14 +72,9 @@ * an anomaly detection job. You can associate only one datafeed with each * anomaly detection job. The datafeed contains a query that runs at a defined * interval (frequency). If you are concerned about delayed data, - * you can add a delay (query_delay) at each interval. When - * Elasticsearch security features are enabled, your datafeed remembers which - * roles the user who created it had at the time of creation and runs the query - * using those same roles. If you provide secondary authorization headers, those - * credentials are used instead. You must use Kibana, this API, or the create - * anomaly detection jobs API to create a datafeed. Do not add a datafeed - * directly to the .ml-config index. Do not give users - * write privileges on the .ml-config index. + * you can add a delay + * (query_delay') at each interval. When Elasticsearch security features are enabled, your datafeed remembers which roles the user who created it had at the time of creation and runs the query using those same roles. If you provide secondary authorization headers, those credentials are used instead. You must use Kibana, this API, or the create anomaly detection jobs API to create a datafeed. Do not add a datafeed directly to the .ml-configindex. Do not give userswriteprivileges on the.ml-config` + * index. * * @see API * specification diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/ml/ValidateDetectorRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/ml/ValidateDetectorRequest.java index c7d84909c..bb3e5f3b5 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/ml/ValidateDetectorRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/ml/ValidateDetectorRequest.java @@ -56,7 +56,7 @@ // typedef: ml.validate_detector.Request /** - * Validate an anomaly detection job. + * Validates an anomaly detection detector. * * @see API * specification diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/monitoring/BulkRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/monitoring/BulkRequest.java index dbb680041..768360106 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/monitoring/BulkRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/monitoring/BulkRequest.java @@ -62,8 +62,7 @@ // typedef: monitoring.bulk.Request /** - * Send monitoring data. This API is used by the monitoring features to send - * monitoring data. + * Used by the monitoring features to send monitoring data. * * @see API * specification diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/monitoring/ElasticsearchMonitoringAsyncClient.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/monitoring/ElasticsearchMonitoringAsyncClient.java index 9dc19d082..aad05d73a 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/monitoring/ElasticsearchMonitoringAsyncClient.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/monitoring/ElasticsearchMonitoringAsyncClient.java @@ -70,8 +70,7 @@ public ElasticsearchMonitoringAsyncClient withTransportOptions(@Nullable Transpo // ----- Endpoint: monitoring.bulk /** - * Send monitoring data. This API is used by the monitoring features to send - * monitoring data. + * Used by the monitoring features to send monitoring data. * * @see Documentation @@ -86,8 +85,7 @@ public CompletableFuture bulk(BulkRequest request) { } /** - * Send monitoring data. This API is used by the monitoring features to send - * monitoring data. + * Used by the monitoring features to send monitoring data. * * @param fn * a function that initializes a builder to create the diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/monitoring/ElasticsearchMonitoringClient.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/monitoring/ElasticsearchMonitoringClient.java index 1ad60de63..0c0fb8752 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/monitoring/ElasticsearchMonitoringClient.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/monitoring/ElasticsearchMonitoringClient.java @@ -69,8 +69,7 @@ public ElasticsearchMonitoringClient withTransportOptions(@Nullable TransportOpt // ----- Endpoint: monitoring.bulk /** - * Send monitoring data. This API is used by the monitoring features to send - * monitoring data. + * Used by the monitoring features to send monitoring data. * * @see Documentation @@ -85,8 +84,7 @@ public BulkResponse bulk(BulkRequest request) throws IOException, ElasticsearchE } /** - * Send monitoring data. This API is used by the monitoring features to send - * monitoring data. + * Used by the monitoring features to send monitoring data. * * @param fn * a function that initializes a builder to create the diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/rollup/DeleteJobRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/rollup/DeleteJobRequest.java index bf077086d..8abae242c 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/rollup/DeleteJobRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/rollup/DeleteJobRequest.java @@ -56,36 +56,8 @@ // typedef: rollup.delete_job.Request /** - * Delete a rollup job. - *

- * A job must be stopped before it can be deleted. If you attempt to delete a - * started job, an error occurs. Similarly, if you attempt to delete a - * nonexistent job, an exception occurs. - *

- * IMPORTANT: When you delete a job, you remove only the process that is - * actively monitoring and rolling up data. The API does not delete any - * previously rolled up data. This is by design; a user may wish to roll up a - * static data set. Because the data set is static, after it has been fully - * rolled up there is no need to keep the indexing rollup job around (as there - * will be no new data). Thus the job can be deleted, leaving behind the rolled - * up data for analysis. If you wish to also remove the rollup data and the - * rollup index contains the data for only a single job, you can delete the - * whole rollup index. If the rollup index stores data from several jobs, you - * must issue a delete-by-query that targets the rollup job's identifier in the - * rollup index. For example: + * Deletes an existing rollup job. * - *

- * POST my_rollup_index/_delete_by_query
- * {
- *   "query": {
- *     "term": {
- *       "_rollup.id": "the_rollup_job_id"
- *     }
- *   }
- * }
- * 
- * 
- * * @see API * specification */ diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/rollup/ElasticsearchRollupAsyncClient.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/rollup/ElasticsearchRollupAsyncClient.java index 1c5b12922..f564b55ba 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/rollup/ElasticsearchRollupAsyncClient.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/rollup/ElasticsearchRollupAsyncClient.java @@ -70,36 +70,8 @@ public ElasticsearchRollupAsyncClient withTransportOptions(@Nullable TransportOp // ----- Endpoint: rollup.delete_job /** - * Delete a rollup job. - *

- * A job must be stopped before it can be deleted. If you attempt to delete a - * started job, an error occurs. Similarly, if you attempt to delete a - * nonexistent job, an exception occurs. - *

- * IMPORTANT: When you delete a job, you remove only the process that is - * actively monitoring and rolling up data. The API does not delete any - * previously rolled up data. This is by design; a user may wish to roll up a - * static data set. Because the data set is static, after it has been fully - * rolled up there is no need to keep the indexing rollup job around (as there - * will be no new data). Thus the job can be deleted, leaving behind the rolled - * up data for analysis. If you wish to also remove the rollup data and the - * rollup index contains the data for only a single job, you can delete the - * whole rollup index. If the rollup index stores data from several jobs, you - * must issue a delete-by-query that targets the rollup job's identifier in the - * rollup index. For example: + * Deletes an existing rollup job. * - *

-	 * POST my_rollup_index/_delete_by_query
-	 * {
-	 *   "query": {
-	 *     "term": {
-	 *       "_rollup.id": "the_rollup_job_id"
-	 *     }
-	 *   }
-	 * }
-	 * 
-	 * 
- * * @see Documentation * on elastic.co @@ -113,36 +85,8 @@ public CompletableFuture deleteJob(DeleteJobRequest request) } /** - * Delete a rollup job. - *

- * A job must be stopped before it can be deleted. If you attempt to delete a - * started job, an error occurs. Similarly, if you attempt to delete a - * nonexistent job, an exception occurs. - *

- * IMPORTANT: When you delete a job, you remove only the process that is - * actively monitoring and rolling up data. The API does not delete any - * previously rolled up data. This is by design; a user may wish to roll up a - * static data set. Because the data set is static, after it has been fully - * rolled up there is no need to keep the indexing rollup job around (as there - * will be no new data). Thus the job can be deleted, leaving behind the rolled - * up data for analysis. If you wish to also remove the rollup data and the - * rollup index contains the data for only a single job, you can delete the - * whole rollup index. If the rollup index stores data from several jobs, you - * must issue a delete-by-query that targets the rollup job's identifier in the - * rollup index. For example: + * Deletes an existing rollup job. * - *

-	 * POST my_rollup_index/_delete_by_query
-	 * {
-	 *   "query": {
-	 *     "term": {
-	 *       "_rollup.id": "the_rollup_job_id"
-	 *     }
-	 *   }
-	 * }
-	 * 
-	 * 
- * * @param fn * a function that initializes a builder to create the * {@link DeleteJobRequest} @@ -159,13 +103,7 @@ public final CompletableFuture deleteJob( // ----- Endpoint: rollup.get_jobs /** - * Get rollup job information. Get the configuration, stats, and status of - * rollup jobs. - *

- * NOTE: This API returns only active (both STARTED and - * STOPPED) jobs. If a job was created, ran for a while, then was - * deleted, the API does not return any details about it. For details about a - * historical rollup job, the rollup capabilities API may be more useful. + * Retrieves the configuration, stats, and status of rollup jobs. * * @see Documentation @@ -180,13 +118,7 @@ public CompletableFuture getJobs(GetJobsRequest request) { } /** - * Get rollup job information. Get the configuration, stats, and status of - * rollup jobs. - *

- * NOTE: This API returns only active (both STARTED and - * STOPPED) jobs. If a job was created, ran for a while, then was - * deleted, the API does not return any details about it. For details about a - * historical rollup job, the rollup capabilities API may be more useful. + * Retrieves the configuration, stats, and status of rollup jobs. * * @param fn * a function that initializes a builder to create the @@ -202,13 +134,7 @@ public final CompletableFuture getJobs( } /** - * Get rollup job information. Get the configuration, stats, and status of - * rollup jobs. - *

- * NOTE: This API returns only active (both STARTED and - * STOPPED) jobs. If a job was created, ran for a while, then was - * deleted, the API does not return any details about it. For details about a - * historical rollup job, the rollup capabilities API may be more useful. + * Retrieves the configuration, stats, and status of rollup jobs. * * @see Documentation @@ -223,21 +149,9 @@ public CompletableFuture getJobs() { // ----- Endpoint: rollup.get_rollup_caps /** - * Get the rollup job capabilities. Get the capabilities of any rollup jobs that - * have been configured for a specific index or index pattern. - *

- * This API is useful because a rollup job is often configured to rollup only a - * subset of fields from the source index. Furthermore, only certain - * aggregations can be configured for various fields, leading to a limited - * subset of functionality depending on that configuration. This API enables you - * to inspect an index and determine: - *

    - *
  1. Does this index have associated rollup data somewhere in the - * cluster?
  2. - *
  3. If yes to the first question, what fields were rolled up, what - * aggregations can be performed, and where does the data live?
  4. - *
- * + * Returns the capabilities of any rollup jobs that have been configured for a + * specific index or index pattern. + * * @see Documentation * on elastic.co @@ -251,21 +165,9 @@ public CompletableFuture getRollupCaps(GetRollupCapsReque } /** - * Get the rollup job capabilities. Get the capabilities of any rollup jobs that - * have been configured for a specific index or index pattern. - *

- * This API is useful because a rollup job is often configured to rollup only a - * subset of fields from the source index. Furthermore, only certain - * aggregations can be configured for various fields, leading to a limited - * subset of functionality depending on that configuration. This API enables you - * to inspect an index and determine: - *

    - *
  1. Does this index have associated rollup data somewhere in the - * cluster?
  2. - *
  3. If yes to the first question, what fields were rolled up, what - * aggregations can be performed, and where does the data live?
  4. - *
- * + * Returns the capabilities of any rollup jobs that have been configured for a + * specific index or index pattern. + * * @param fn * a function that initializes a builder to create the * {@link GetRollupCapsRequest} @@ -280,21 +182,9 @@ public final CompletableFuture getRollupCaps( } /** - * Get the rollup job capabilities. Get the capabilities of any rollup jobs that - * have been configured for a specific index or index pattern. - *

- * This API is useful because a rollup job is often configured to rollup only a - * subset of fields from the source index. Furthermore, only certain - * aggregations can be configured for various fields, leading to a limited - * subset of functionality depending on that configuration. This API enables you - * to inspect an index and determine: - *

    - *
  1. Does this index have associated rollup data somewhere in the - * cluster?
  2. - *
  3. If yes to the first question, what fields were rolled up, what - * aggregations can be performed, and where does the data live?
  4. - *
- * + * Returns the capabilities of any rollup jobs that have been configured for a + * specific index or index pattern. + * * @see Documentation * on elastic.co @@ -308,17 +198,9 @@ public CompletableFuture getRollupCaps() { // ----- Endpoint: rollup.get_rollup_index_caps /** - * Get the rollup index capabilities. Get the rollup capabilities of all jobs - * inside of a rollup index. A single rollup index may store the data for - * multiple rollup jobs and may have a variety of capabilities depending on - * those jobs. This API enables you to determine: - *
    - *
  • What jobs are stored in an index (or indices specified via a - * pattern)?
  • - *
  • What target indices were rolled up, what fields were used in those - * rollups, and what aggregations can be performed on each job?
  • - *
- * + * Returns the rollup capabilities of all jobs inside of a rollup index (for + * example, the index where rollup data is stored). + * * @see Documentation * on elastic.co @@ -332,17 +214,9 @@ public CompletableFuture getRollupIndexCaps(GetRollu } /** - * Get the rollup index capabilities. Get the rollup capabilities of all jobs - * inside of a rollup index. A single rollup index may store the data for - * multiple rollup jobs and may have a variety of capabilities depending on - * those jobs. This API enables you to determine: - *
    - *
  • What jobs are stored in an index (or indices specified via a - * pattern)?
  • - *
  • What target indices were rolled up, what fields were used in those - * rollups, and what aggregations can be performed on each job?
  • - *
- * + * Returns the rollup capabilities of all jobs inside of a rollup index (for + * example, the index where rollup data is stored). + * * @param fn * a function that initializes a builder to create the * {@link GetRollupIndexCapsRequest} @@ -359,23 +233,7 @@ public final CompletableFuture getRollupIndexCaps( // ----- Endpoint: rollup.put_job /** - * Create a rollup job. - *

- * WARNING: From 8.15.0, calling this API in a cluster with no rollup usage will - * fail with a message about the deprecation and planned removal of rollup - * features. A cluster needs to contain either a rollup job or a rollup index in - * order for this API to be allowed to run. - *

- * The rollup job configuration contains all the details about how the job - * should run, when it indexes documents, and what future queries will be able - * to run against the rollup index. - *

- * There are three main sections to the job configuration: the logistical - * details about the job (for example, the cron schedule), the fields that are - * used for grouping, and what metrics to collect for each group. - *

- * Jobs are created in a STOPPED state. You can start them with the - * start rollup jobs API. + * Creates a rollup job. * * @see Documentation @@ -390,23 +248,7 @@ public CompletableFuture putJob(PutJobRequest request) { } /** - * Create a rollup job. - *

- * WARNING: From 8.15.0, calling this API in a cluster with no rollup usage will - * fail with a message about the deprecation and planned removal of rollup - * features. A cluster needs to contain either a rollup job or a rollup index in - * order for this API to be allowed to run. - *

- * The rollup job configuration contains all the details about how the job - * should run, when it indexes documents, and what future queries will be able - * to run against the rollup index. - *

- * There are three main sections to the job configuration: the logistical - * details about the job (for example, the cron schedule), the fields that are - * used for grouping, and what metrics to collect for each group. - *

- * Jobs are created in a STOPPED state. You can start them with the - * start rollup jobs API. + * Creates a rollup job. * * @param fn * a function that initializes a builder to create the @@ -424,11 +266,7 @@ public final CompletableFuture putJob( // ----- Endpoint: rollup.rollup_search /** - * Search rolled-up data. The rollup search endpoint is needed because, - * internally, rolled-up documents utilize a different document structure than - * the original data. It rewrites standard Query DSL into a format that matches - * the rollup documents then takes the response and rewrites it back to what a - * client would expect given the original query. + * Enables searching rolled-up data using the standard Query DSL. * * @see Documentation @@ -447,11 +285,7 @@ public CompletableFuture> rollupSear } /** - * Search rolled-up data. The rollup search endpoint is needed because, - * internally, rolled-up documents utilize a different document structure than - * the original data. It rewrites standard Query DSL into a format that matches - * the rollup documents then takes the response and rewrites it back to what a - * client would expect given the original query. + * Enables searching rolled-up data using the standard Query DSL. * * @param fn * a function that initializes a builder to create the @@ -468,11 +302,7 @@ public final CompletableFuture> roll } /** - * Search rolled-up data. The rollup search endpoint is needed because, - * internally, rolled-up documents utilize a different document structure than - * the original data. It rewrites standard Query DSL into a format that matches - * the rollup documents then takes the response and rewrites it back to what a - * client would expect given the original query. + * Enables searching rolled-up data using the standard Query DSL. * * @see Documentation @@ -491,11 +321,7 @@ public CompletableFuture> rollupSear } /** - * Search rolled-up data. The rollup search endpoint is needed because, - * internally, rolled-up documents utilize a different document structure than - * the original data. It rewrites standard Query DSL into a format that matches - * the rollup documents then takes the response and rewrites it back to what a - * client would expect given the original query. + * Enables searching rolled-up data using the standard Query DSL. * * @param fn * a function that initializes a builder to create the @@ -513,9 +339,7 @@ public final CompletableFuture> roll // ----- Endpoint: rollup.start_job /** - * Start rollup jobs. If you try to start a job that does not exist, an - * exception occurs. If you try to start a job that is already started, nothing - * happens. + * Starts an existing, stopped rollup job. * * @see Documentation @@ -530,9 +354,7 @@ public CompletableFuture startJob(StartJobRequest request) { } /** - * Start rollup jobs. If you try to start a job that does not exist, an - * exception occurs. If you try to start a job that is already started, nothing - * happens. + * Starts an existing, stopped rollup job. * * @param fn * a function that initializes a builder to create the @@ -550,8 +372,7 @@ public final CompletableFuture startJob( // ----- Endpoint: rollup.stop_job /** - * Stop rollup jobs. If you try to stop a job that does not exist, an exception - * occurs. If you try to stop a job that is already stopped, nothing happens. + * Stops an existing, started rollup job. * * @see Documentation @@ -566,8 +387,7 @@ public CompletableFuture stopJob(StopJobRequest request) { } /** - * Stop rollup jobs. If you try to stop a job that does not exist, an exception - * occurs. If you try to stop a job that is already stopped, nothing happens. + * Stops an existing, started rollup job. * * @param fn * a function that initializes a builder to create the diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/rollup/ElasticsearchRollupClient.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/rollup/ElasticsearchRollupClient.java index 021e3f572..f8b4584cc 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/rollup/ElasticsearchRollupClient.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/rollup/ElasticsearchRollupClient.java @@ -70,36 +70,8 @@ public ElasticsearchRollupClient withTransportOptions(@Nullable TransportOptions // ----- Endpoint: rollup.delete_job /** - * Delete a rollup job. - *

- * A job must be stopped before it can be deleted. If you attempt to delete a - * started job, an error occurs. Similarly, if you attempt to delete a - * nonexistent job, an exception occurs. - *

- * IMPORTANT: When you delete a job, you remove only the process that is - * actively monitoring and rolling up data. The API does not delete any - * previously rolled up data. This is by design; a user may wish to roll up a - * static data set. Because the data set is static, after it has been fully - * rolled up there is no need to keep the indexing rollup job around (as there - * will be no new data). Thus the job can be deleted, leaving behind the rolled - * up data for analysis. If you wish to also remove the rollup data and the - * rollup index contains the data for only a single job, you can delete the - * whole rollup index. If the rollup index stores data from several jobs, you - * must issue a delete-by-query that targets the rollup job's identifier in the - * rollup index. For example: + * Deletes an existing rollup job. * - *

-	 * POST my_rollup_index/_delete_by_query
-	 * {
-	 *   "query": {
-	 *     "term": {
-	 *       "_rollup.id": "the_rollup_job_id"
-	 *     }
-	 *   }
-	 * }
-	 * 
-	 * 
- * * @see Documentation * on elastic.co @@ -113,36 +85,8 @@ public DeleteJobResponse deleteJob(DeleteJobRequest request) throws IOException, } /** - * Delete a rollup job. - *

- * A job must be stopped before it can be deleted. If you attempt to delete a - * started job, an error occurs. Similarly, if you attempt to delete a - * nonexistent job, an exception occurs. - *

- * IMPORTANT: When you delete a job, you remove only the process that is - * actively monitoring and rolling up data. The API does not delete any - * previously rolled up data. This is by design; a user may wish to roll up a - * static data set. Because the data set is static, after it has been fully - * rolled up there is no need to keep the indexing rollup job around (as there - * will be no new data). Thus the job can be deleted, leaving behind the rolled - * up data for analysis. If you wish to also remove the rollup data and the - * rollup index contains the data for only a single job, you can delete the - * whole rollup index. If the rollup index stores data from several jobs, you - * must issue a delete-by-query that targets the rollup job's identifier in the - * rollup index. For example: + * Deletes an existing rollup job. * - *

-	 * POST my_rollup_index/_delete_by_query
-	 * {
-	 *   "query": {
-	 *     "term": {
-	 *       "_rollup.id": "the_rollup_job_id"
-	 *     }
-	 *   }
-	 * }
-	 * 
-	 * 
- * * @param fn * a function that initializes a builder to create the * {@link DeleteJobRequest} @@ -159,13 +103,7 @@ public final DeleteJobResponse deleteJob(Function - * NOTE: This API returns only active (both STARTED and - * STOPPED) jobs. If a job was created, ran for a while, then was - * deleted, the API does not return any details about it. For details about a - * historical rollup job, the rollup capabilities API may be more useful. + * Retrieves the configuration, stats, and status of rollup jobs. * * @see Documentation @@ -180,13 +118,7 @@ public GetJobsResponse getJobs(GetJobsRequest request) throws IOException, Elast } /** - * Get rollup job information. Get the configuration, stats, and status of - * rollup jobs. - *

- * NOTE: This API returns only active (both STARTED and - * STOPPED) jobs. If a job was created, ran for a while, then was - * deleted, the API does not return any details about it. For details about a - * historical rollup job, the rollup capabilities API may be more useful. + * Retrieves the configuration, stats, and status of rollup jobs. * * @param fn * a function that initializes a builder to create the @@ -202,13 +134,7 @@ public final GetJobsResponse getJobs(Function - * NOTE: This API returns only active (both STARTED and - * STOPPED) jobs. If a job was created, ran for a while, then was - * deleted, the API does not return any details about it. For details about a - * historical rollup job, the rollup capabilities API may be more useful. + * Retrieves the configuration, stats, and status of rollup jobs. * * @see Documentation @@ -223,21 +149,9 @@ public GetJobsResponse getJobs() throws IOException, ElasticsearchException { // ----- Endpoint: rollup.get_rollup_caps /** - * Get the rollup job capabilities. Get the capabilities of any rollup jobs that - * have been configured for a specific index or index pattern. - *

- * This API is useful because a rollup job is often configured to rollup only a - * subset of fields from the source index. Furthermore, only certain - * aggregations can be configured for various fields, leading to a limited - * subset of functionality depending on that configuration. This API enables you - * to inspect an index and determine: - *

    - *
  1. Does this index have associated rollup data somewhere in the - * cluster?
  2. - *
  3. If yes to the first question, what fields were rolled up, what - * aggregations can be performed, and where does the data live?
  4. - *
- * + * Returns the capabilities of any rollup jobs that have been configured for a + * specific index or index pattern. + * * @see Documentation * on elastic.co @@ -252,21 +166,9 @@ public GetRollupCapsResponse getRollupCaps(GetRollupCapsRequest request) } /** - * Get the rollup job capabilities. Get the capabilities of any rollup jobs that - * have been configured for a specific index or index pattern. - *

- * This API is useful because a rollup job is often configured to rollup only a - * subset of fields from the source index. Furthermore, only certain - * aggregations can be configured for various fields, leading to a limited - * subset of functionality depending on that configuration. This API enables you - * to inspect an index and determine: - *

    - *
  1. Does this index have associated rollup data somewhere in the - * cluster?
  2. - *
  3. If yes to the first question, what fields were rolled up, what - * aggregations can be performed, and where does the data live?
  4. - *
- * + * Returns the capabilities of any rollup jobs that have been configured for a + * specific index or index pattern. + * * @param fn * a function that initializes a builder to create the * {@link GetRollupCapsRequest} @@ -282,21 +184,9 @@ public final GetRollupCapsResponse getRollupCaps( } /** - * Get the rollup job capabilities. Get the capabilities of any rollup jobs that - * have been configured for a specific index or index pattern. - *

- * This API is useful because a rollup job is often configured to rollup only a - * subset of fields from the source index. Furthermore, only certain - * aggregations can be configured for various fields, leading to a limited - * subset of functionality depending on that configuration. This API enables you - * to inspect an index and determine: - *

    - *
  1. Does this index have associated rollup data somewhere in the - * cluster?
  2. - *
  3. If yes to the first question, what fields were rolled up, what - * aggregations can be performed, and where does the data live?
  4. - *
- * + * Returns the capabilities of any rollup jobs that have been configured for a + * specific index or index pattern. + * * @see Documentation * on elastic.co @@ -310,17 +200,9 @@ public GetRollupCapsResponse getRollupCaps() throws IOException, ElasticsearchEx // ----- Endpoint: rollup.get_rollup_index_caps /** - * Get the rollup index capabilities. Get the rollup capabilities of all jobs - * inside of a rollup index. A single rollup index may store the data for - * multiple rollup jobs and may have a variety of capabilities depending on - * those jobs. This API enables you to determine: - *
    - *
  • What jobs are stored in an index (or indices specified via a - * pattern)?
  • - *
  • What target indices were rolled up, what fields were used in those - * rollups, and what aggregations can be performed on each job?
  • - *
- * + * Returns the rollup capabilities of all jobs inside of a rollup index (for + * example, the index where rollup data is stored). + * * @see Documentation * on elastic.co @@ -335,17 +217,9 @@ public GetRollupIndexCapsResponse getRollupIndexCaps(GetRollupIndexCapsRequest r } /** - * Get the rollup index capabilities. Get the rollup capabilities of all jobs - * inside of a rollup index. A single rollup index may store the data for - * multiple rollup jobs and may have a variety of capabilities depending on - * those jobs. This API enables you to determine: - *
    - *
  • What jobs are stored in an index (or indices specified via a - * pattern)?
  • - *
  • What target indices were rolled up, what fields were used in those - * rollups, and what aggregations can be performed on each job?
  • - *
- * + * Returns the rollup capabilities of all jobs inside of a rollup index (for + * example, the index where rollup data is stored). + * * @param fn * a function that initializes a builder to create the * {@link GetRollupIndexCapsRequest} @@ -363,23 +237,7 @@ public final GetRollupIndexCapsResponse getRollupIndexCaps( // ----- Endpoint: rollup.put_job /** - * Create a rollup job. - *

- * WARNING: From 8.15.0, calling this API in a cluster with no rollup usage will - * fail with a message about the deprecation and planned removal of rollup - * features. A cluster needs to contain either a rollup job or a rollup index in - * order for this API to be allowed to run. - *

- * The rollup job configuration contains all the details about how the job - * should run, when it indexes documents, and what future queries will be able - * to run against the rollup index. - *

- * There are three main sections to the job configuration: the logistical - * details about the job (for example, the cron schedule), the fields that are - * used for grouping, and what metrics to collect for each group. - *

- * Jobs are created in a STOPPED state. You can start them with the - * start rollup jobs API. + * Creates a rollup job. * * @see Documentation @@ -394,23 +252,7 @@ public PutJobResponse putJob(PutJobRequest request) throws IOException, Elastics } /** - * Create a rollup job. - *

- * WARNING: From 8.15.0, calling this API in a cluster with no rollup usage will - * fail with a message about the deprecation and planned removal of rollup - * features. A cluster needs to contain either a rollup job or a rollup index in - * order for this API to be allowed to run. - *

- * The rollup job configuration contains all the details about how the job - * should run, when it indexes documents, and what future queries will be able - * to run against the rollup index. - *

- * There are three main sections to the job configuration: the logistical - * details about the job (for example, the cron schedule), the fields that are - * used for grouping, and what metrics to collect for each group. - *

- * Jobs are created in a STOPPED state. You can start them with the - * start rollup jobs API. + * Creates a rollup job. * * @param fn * a function that initializes a builder to create the @@ -428,11 +270,7 @@ public final PutJobResponse putJob(FunctionDocumentation @@ -451,11 +289,7 @@ public RollupSearchResponse rollupSearch(RollupSearchRequ } /** - * Search rolled-up data. The rollup search endpoint is needed because, - * internally, rolled-up documents utilize a different document structure than - * the original data. It rewrites standard Query DSL into a format that matches - * the rollup documents then takes the response and rewrites it back to what a - * client would expect given the original query. + * Enables searching rolled-up data using the standard Query DSL. * * @param fn * a function that initializes a builder to create the @@ -472,11 +306,7 @@ public final RollupSearchResponse rollupSearch( } /** - * Search rolled-up data. The rollup search endpoint is needed because, - * internally, rolled-up documents utilize a different document structure than - * the original data. It rewrites standard Query DSL into a format that matches - * the rollup documents then takes the response and rewrites it back to what a - * client would expect given the original query. + * Enables searching rolled-up data using the standard Query DSL. * * @see Documentation @@ -495,11 +325,7 @@ public RollupSearchResponse rollupSearch(RollupSearchRequ } /** - * Search rolled-up data. The rollup search endpoint is needed because, - * internally, rolled-up documents utilize a different document structure than - * the original data. It rewrites standard Query DSL into a format that matches - * the rollup documents then takes the response and rewrites it back to what a - * client would expect given the original query. + * Enables searching rolled-up data using the standard Query DSL. * * @param fn * a function that initializes a builder to create the @@ -518,9 +344,7 @@ public final RollupSearchResponse rollupSearch( // ----- Endpoint: rollup.start_job /** - * Start rollup jobs. If you try to start a job that does not exist, an - * exception occurs. If you try to start a job that is already started, nothing - * happens. + * Starts an existing, stopped rollup job. * * @see Documentation @@ -535,9 +359,7 @@ public StartJobResponse startJob(StartJobRequest request) throws IOException, El } /** - * Start rollup jobs. If you try to start a job that does not exist, an - * exception occurs. If you try to start a job that is already started, nothing - * happens. + * Starts an existing, stopped rollup job. * * @param fn * a function that initializes a builder to create the @@ -555,8 +377,7 @@ public final StartJobResponse startJob(FunctionDocumentation @@ -571,8 +392,7 @@ public StopJobResponse stopJob(StopJobRequest request) throws IOException, Elast } /** - * Stop rollup jobs. If you try to stop a job that does not exist, an exception - * occurs. If you try to stop a job that is already stopped, nothing happens. + * Stops an existing, started rollup job. * * @param fn * a function that initializes a builder to create the diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/rollup/GetJobsRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/rollup/GetJobsRequest.java index 79ed80012..29f9fe583 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/rollup/GetJobsRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/rollup/GetJobsRequest.java @@ -55,13 +55,7 @@ // typedef: rollup.get_jobs.Request /** - * Get rollup job information. Get the configuration, stats, and status of - * rollup jobs. - *

- * NOTE: This API returns only active (both STARTED and - * STOPPED) jobs. If a job was created, ran for a while, then was - * deleted, the API does not return any details about it. For details about a - * historical rollup job, the rollup capabilities API may be more useful. + * Retrieves the configuration, stats, and status of rollup jobs. * * @see API * specification diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/rollup/GetRollupCapsRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/rollup/GetRollupCapsRequest.java index 43f2c701d..7f1a32ac5 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/rollup/GetRollupCapsRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/rollup/GetRollupCapsRequest.java @@ -55,21 +55,9 @@ // typedef: rollup.get_rollup_caps.Request /** - * Get the rollup job capabilities. Get the capabilities of any rollup jobs that - * have been configured for a specific index or index pattern. - *

- * This API is useful because a rollup job is often configured to rollup only a - * subset of fields from the source index. Furthermore, only certain - * aggregations can be configured for various fields, leading to a limited - * subset of functionality depending on that configuration. This API enables you - * to inspect an index and determine: - *

    - *
  1. Does this index have associated rollup data somewhere in the - * cluster?
  2. - *
  3. If yes to the first question, what fields were rolled up, what - * aggregations can be performed, and where does the data live?
  4. - *
- * + * Returns the capabilities of any rollup jobs that have been configured for a + * specific index or index pattern. + * * @see API * specification */ diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/rollup/GetRollupIndexCapsRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/rollup/GetRollupIndexCapsRequest.java index 72aef2df2..d4f711672 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/rollup/GetRollupIndexCapsRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/rollup/GetRollupIndexCapsRequest.java @@ -58,17 +58,9 @@ // typedef: rollup.get_rollup_index_caps.Request /** - * Get the rollup index capabilities. Get the rollup capabilities of all jobs - * inside of a rollup index. A single rollup index may store the data for - * multiple rollup jobs and may have a variety of capabilities depending on - * those jobs. This API enables you to determine: - *
    - *
  • What jobs are stored in an index (or indices specified via a - * pattern)?
  • - *
  • What target indices were rolled up, what fields were used in those - * rollups, and what aggregations can be performed on each job?
  • - *
- * + * Returns the rollup capabilities of all jobs inside of a rollup index (for + * example, the index where rollup data is stored). + * * @see API * specification diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/rollup/PutJobRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/rollup/PutJobRequest.java index 5e059d002..f556d0ce9 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/rollup/PutJobRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/rollup/PutJobRequest.java @@ -61,23 +61,7 @@ // typedef: rollup.put_job.Request /** - * Create a rollup job. - *

- * WARNING: From 8.15.0, calling this API in a cluster with no rollup usage will - * fail with a message about the deprecation and planned removal of rollup - * features. A cluster needs to contain either a rollup job or a rollup index in - * order for this API to be allowed to run. - *

- * The rollup job configuration contains all the details about how the job - * should run, when it indexes documents, and what future queries will be able - * to run against the rollup index. - *

- * There are three main sections to the job configuration: the logistical - * details about the job (for example, the cron schedule), the fields that are - * used for grouping, and what metrics to collect for each group. - *

- * Jobs are created in a STOPPED state. You can start them with the - * start rollup jobs API. + * Creates a rollup job. * * @see API * specification diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/rollup/RollupSearchRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/rollup/RollupSearchRequest.java index 0d9cc55f8..0eab9af1c 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/rollup/RollupSearchRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/rollup/RollupSearchRequest.java @@ -63,11 +63,7 @@ // typedef: rollup.rollup_search.Request /** - * Search rolled-up data. The rollup search endpoint is needed because, - * internally, rolled-up documents utilize a different document structure than - * the original data. It rewrites standard Query DSL into a format that matches - * the rollup documents then takes the response and rewrites it back to what a - * client would expect given the original query. + * Enables searching rolled-up data using the standard Query DSL. * * @see API * specification diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/rollup/StartJobRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/rollup/StartJobRequest.java index 4d44c46ff..dc63e9189 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/rollup/StartJobRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/rollup/StartJobRequest.java @@ -56,9 +56,7 @@ // typedef: rollup.start_job.Request /** - * Start rollup jobs. If you try to start a job that does not exist, an - * exception occurs. If you try to start a job that is already started, nothing - * happens. + * Starts an existing, stopped rollup job. * * @see API * specification diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/rollup/StopJobRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/rollup/StopJobRequest.java index 8df95af37..08eb7d861 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/rollup/StopJobRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/rollup/StopJobRequest.java @@ -57,8 +57,7 @@ // typedef: rollup.stop_job.Request /** - * Stop rollup jobs. If you try to stop a job that does not exist, an exception - * occurs. If you try to stop a job that is already stopped, nothing happens. + * Stops an existing, started rollup job. * * @see API * specification diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/search_application/ListResponse.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/search_application/ListResponse.java index 80c24551d..26a914f44 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/search_application/ListResponse.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/search_application/ListResponse.java @@ -19,6 +19,7 @@ package co.elastic.clients.elasticsearch.search_application; +import co.elastic.clients.elasticsearch.search_application.list.SearchApplicationListItem; import co.elastic.clients.json.JsonpDeserializable; import co.elastic.clients.json.JsonpDeserializer; import co.elastic.clients.json.JsonpMapper; @@ -63,7 +64,7 @@ public class ListResponse implements JsonpSerializable { private final long count; - private final List results; + private final List results; // --------------------------------------------------------------------------------------------- @@ -88,7 +89,7 @@ public final long count() { /** * Required - API name: {@code results} */ - public final List results() { + public final List results() { return this.results; } @@ -109,7 +110,7 @@ protected void serializeInternal(JsonGenerator generator, JsonpMapper mapper) { if (ApiTypeHelper.isDefined(this.results)) { generator.writeKey("results"); generator.writeStartArray(); - for (SearchApplication item0 : this.results) { + for (SearchApplicationListItem item0 : this.results) { item0.serialize(generator, mapper); } @@ -133,7 +134,7 @@ public String toString() { public static class Builder extends WithJsonObjectBuilderBase implements ObjectBuilder { private Long count; - private List results; + private List results; /** * Required - API name: {@code count} @@ -148,7 +149,7 @@ public final Builder count(long value) { *

* Adds all elements of list to results. */ - public final Builder results(List list) { + public final Builder results(List list) { this.results = _listAddAll(this.results, list); return this; } @@ -158,7 +159,7 @@ public final Builder results(List list) { *

* Adds one or more values to results. */ - public final Builder results(SearchApplication value, SearchApplication... values) { + public final Builder results(SearchApplicationListItem value, SearchApplicationListItem... values) { this.results = _listAdd(this.results, value, values); return this; } @@ -168,8 +169,9 @@ public final Builder results(SearchApplication value, SearchApplication... value *

* Adds a value to results using a builder lambda. */ - public final Builder results(Function> fn) { - return results(fn.apply(new SearchApplication.Builder()).build()); + public final Builder results( + Function> fn) { + return results(fn.apply(new SearchApplicationListItem.Builder()).build()); } @Override @@ -201,7 +203,8 @@ public ListResponse build() { protected static void setupListResponseDeserializer(ObjectDeserializer op) { op.add(Builder::count, JsonpDeserializer.longDeserializer(), "count"); - op.add(Builder::results, JsonpDeserializer.arrayDeserializer(SearchApplication._DESERIALIZER), "results"); + op.add(Builder::results, JsonpDeserializer.arrayDeserializer(SearchApplicationListItem._DESERIALIZER), + "results"); } diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/search_application/PutRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/search_application/PutRequest.java index 100047271..cec4f56f1 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/search_application/PutRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/search_application/PutRequest.java @@ -71,7 +71,7 @@ public class PutRequest extends RequestBase implements JsonpSerializable { private final String name; - private final SearchApplicationParameters searchApplication; + private final SearchApplication searchApplication; // --------------------------------------------------------------------------------------------- @@ -110,7 +110,7 @@ public final String name() { /** * Required - Request body. */ - public final SearchApplicationParameters searchApplication() { + public final SearchApplication searchApplication() { return this.searchApplication; } @@ -134,7 +134,7 @@ public static class Builder extends RequestBase.AbstractBuilder impleme private String name; - private SearchApplicationParameters searchApplication; + private SearchApplication searchApplication; /** * If true, this request cannot replace or update existing Search @@ -160,7 +160,7 @@ public final Builder name(String value) { /** * Required - Request body. */ - public final Builder searchApplication(SearchApplicationParameters value) { + public final Builder searchApplication(SearchApplication value) { this.searchApplication = value; return this; } @@ -169,16 +169,15 @@ public final Builder searchApplication(SearchApplicationParameters value) { * Required - Request body. */ public final Builder searchApplication( - Function> fn) { - return this.searchApplication(fn.apply(new SearchApplicationParameters.Builder()).build()); + Function> fn) { + return this.searchApplication(fn.apply(new SearchApplication.Builder()).build()); } @Override public Builder withJson(JsonParser parser, JsonpMapper mapper) { @SuppressWarnings("unchecked") - SearchApplicationParameters value = (SearchApplicationParameters) SearchApplicationParameters._DESERIALIZER - .deserialize(parser, mapper); + SearchApplication value = (SearchApplication) SearchApplication._DESERIALIZER.deserialize(parser, mapper); return this.searchApplication(value); } @@ -203,7 +202,7 @@ public PutRequest build() { public static final JsonpDeserializer _DESERIALIZER = createPutRequestDeserializer(); protected static JsonpDeserializer createPutRequestDeserializer() { - JsonpDeserializer valueDeserializer = SearchApplicationParameters._DESERIALIZER; + JsonpDeserializer valueDeserializer = SearchApplication._DESERIALIZER; return JsonpDeserializer.of(valueDeserializer.acceptedEvents(), (parser, mapper, event) -> new Builder() .searchApplication(valueDeserializer.deserialize(parser, mapper, event)).build()); diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/search_application/SearchApplication.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/search_application/SearchApplication.java index 2ad9829b0..0e34c6ab7 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/search_application/SearchApplication.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/search_application/SearchApplication.java @@ -22,6 +22,8 @@ import co.elastic.clients.json.JsonpDeserializable; import co.elastic.clients.json.JsonpDeserializer; import co.elastic.clients.json.JsonpMapper; +import co.elastic.clients.json.JsonpSerializable; +import co.elastic.clients.json.JsonpUtils; import co.elastic.clients.json.ObjectBuilderDeserializer; import co.elastic.clients.json.ObjectDeserializer; import co.elastic.clients.util.ApiTypeHelper; @@ -30,6 +32,7 @@ import jakarta.json.stream.JsonGenerator; import java.lang.Long; import java.lang.String; +import java.util.List; import java.util.Objects; import java.util.function.Function; import javax.annotation.Nullable; @@ -58,18 +61,28 @@ * specification */ @JsonpDeserializable -public class SearchApplication extends SearchApplicationParameters { +public class SearchApplication implements JsonpSerializable { private final String name; + private final List indices; + private final long updatedAtMillis; + @Nullable + private final String analyticsCollectionName; + + @Nullable + private final SearchApplicationTemplate template; + // --------------------------------------------------------------------------------------------- protected SearchApplication(AbstractBuilder builder) { - super(builder); this.name = ApiTypeHelper.requireNonNull(builder.name, this, "name"); + this.indices = ApiTypeHelper.unmodifiableRequired(builder.indices, this, "indices"); this.updatedAtMillis = ApiTypeHelper.requireNonNull(builder.updatedAtMillis, this, "updatedAtMillis"); + this.analyticsCollectionName = builder.analyticsCollectionName; + this.template = builder.template; } @@ -78,7 +91,7 @@ public static SearchApplication searchApplicationOf(Function * API name: {@code name} */ @@ -86,6 +99,15 @@ public final String name() { return this.name; } + /** + * Required - Indices that are part of the Search Application. + *

+ * API name: {@code indices} + */ + public final List indices() { + return this.indices; + } + /** * Required - Last time the Search Application was updated. *

@@ -95,15 +117,69 @@ public final long updatedAtMillis() { return this.updatedAtMillis; } + /** + * Analytics collection associated to the Search Application. + *

+ * API name: {@code analytics_collection_name} + */ + @Nullable + public final String analyticsCollectionName() { + return this.analyticsCollectionName; + } + + /** + * Search template to use on search operations. + *

+ * API name: {@code template} + */ + @Nullable + public final SearchApplicationTemplate template() { + return this.template; + } + + /** + * Serialize this object to JSON. + */ + public void serialize(JsonGenerator generator, JsonpMapper mapper) { + generator.writeStartObject(); + serializeInternal(generator, mapper); + generator.writeEnd(); + } + protected void serializeInternal(JsonGenerator generator, JsonpMapper mapper) { - super.serializeInternal(generator, mapper); generator.writeKey("name"); generator.write(this.name); + if (ApiTypeHelper.isDefined(this.indices)) { + generator.writeKey("indices"); + generator.writeStartArray(); + for (String item0 : this.indices) { + generator.write(item0); + + } + generator.writeEnd(); + + } generator.writeKey("updated_at_millis"); generator.write(this.updatedAtMillis); + if (this.analyticsCollectionName != null) { + generator.writeKey("analytics_collection_name"); + generator.write(this.analyticsCollectionName); + + } + if (this.template != null) { + generator.writeKey("template"); + this.template.serialize(generator, mapper); + + } + + } + + @Override + public String toString() { + return JsonpUtils.toString(this); } // --------------------------------------------------------------------------------------------- @@ -135,13 +211,21 @@ public SearchApplication build() { public abstract static class AbstractBuilder> extends - SearchApplicationParameters.AbstractBuilder { + WithJsonObjectBuilderBase { private String name; + private List indices; + private Long updatedAtMillis; + @Nullable + private String analyticsCollectionName; + + @Nullable + private SearchApplicationTemplate template; + /** - * Required - Search Application name + * Required - Search Application name. *

* API name: {@code name} */ @@ -150,6 +234,30 @@ public final BuilderT name(String value) { return self(); } + /** + * Required - Indices that are part of the Search Application. + *

+ * API name: {@code indices} + *

+ * Adds all elements of list to indices. + */ + public final BuilderT indices(List list) { + this.indices = _listAddAll(this.indices, list); + return self(); + } + + /** + * Required - Indices that are part of the Search Application. + *

+ * API name: {@code indices} + *

+ * Adds one or more values to indices. + */ + public final BuilderT indices(String value, String... values) { + this.indices = _listAdd(this.indices, value, values); + return self(); + } + /** * Required - Last time the Search Application was updated. *

@@ -160,6 +268,38 @@ public final BuilderT updatedAtMillis(long value) { return self(); } + /** + * Analytics collection associated to the Search Application. + *

+ * API name: {@code analytics_collection_name} + */ + public final BuilderT analyticsCollectionName(@Nullable String value) { + this.analyticsCollectionName = value; + return self(); + } + + /** + * Search template to use on search operations. + *

+ * API name: {@code template} + */ + public final BuilderT template(@Nullable SearchApplicationTemplate value) { + this.template = value; + return self(); + } + + /** + * Search template to use on search operations. + *

+ * API name: {@code template} + */ + public final BuilderT template( + Function> fn) { + return this.template(fn.apply(new SearchApplicationTemplate.Builder()).build()); + } + + protected abstract BuilderT self(); + } // --------------------------------------------------------------------------------------------- @@ -172,9 +312,14 @@ public final BuilderT updatedAtMillis(long value) { protected static > void setupSearchApplicationDeserializer( ObjectDeserializer op) { - SearchApplicationParameters.setupSearchApplicationParametersDeserializer(op); + op.add(AbstractBuilder::name, JsonpDeserializer.stringDeserializer(), "name"); + op.add(AbstractBuilder::indices, JsonpDeserializer.arrayDeserializer(JsonpDeserializer.stringDeserializer()), + "indices"); op.add(AbstractBuilder::updatedAtMillis, JsonpDeserializer.longDeserializer(), "updated_at_millis"); + op.add(AbstractBuilder::analyticsCollectionName, JsonpDeserializer.stringDeserializer(), + "analytics_collection_name"); + op.add(AbstractBuilder::template, SearchApplicationTemplate._DESERIALIZER, "template"); } diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/search_application/SearchApplicationParameters.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/search_application/list/SearchApplicationListItem.java similarity index 58% rename from java-client/src/main/java/co/elastic/clients/elasticsearch/search_application/SearchApplicationParameters.java rename to java-client/src/main/java/co/elastic/clients/elasticsearch/search_application/list/SearchApplicationListItem.java index f8d57a04a..c75be3999 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/search_application/SearchApplicationParameters.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/search_application/list/SearchApplicationListItem.java @@ -17,7 +17,7 @@ * under the License. */ -package co.elastic.clients.elasticsearch.search_application; +package co.elastic.clients.elasticsearch.search_application.list; import co.elastic.clients.json.JsonpDeserializable; import co.elastic.clients.json.JsonpDeserializer; @@ -30,6 +30,7 @@ import co.elastic.clients.util.ObjectBuilder; import co.elastic.clients.util.WithJsonObjectBuilderBase; import jakarta.json.stream.JsonGenerator; +import java.lang.Long; import java.lang.String; import java.util.List; import java.util.Objects; @@ -51,41 +52,51 @@ // //---------------------------------------------------------------- -// typedef: search_application._types.SearchApplicationParameters +// typedef: search_application.list.SearchApplicationListItem /** * * @see API + * "../../doc-files/api-spec.html#search_application.list.SearchApplicationListItem">API * specification */ @JsonpDeserializable -public class SearchApplicationParameters implements JsonpSerializable { +public class SearchApplicationListItem implements JsonpSerializable { + private final String name; + private final List indices; - @Nullable - private final String analyticsCollectionName; + private final long updatedAtMillis; @Nullable - private final SearchApplicationTemplate template; + private final String analyticsCollectionName; // --------------------------------------------------------------------------------------------- - protected SearchApplicationParameters(AbstractBuilder builder) { + private SearchApplicationListItem(Builder builder) { + this.name = ApiTypeHelper.requireNonNull(builder.name, this, "name"); this.indices = ApiTypeHelper.unmodifiableRequired(builder.indices, this, "indices"); + this.updatedAtMillis = ApiTypeHelper.requireNonNull(builder.updatedAtMillis, this, "updatedAtMillis"); this.analyticsCollectionName = builder.analyticsCollectionName; - this.template = builder.template; } - public static SearchApplicationParameters searchApplicationParametersOf( - Function> fn) { + public static SearchApplicationListItem of(Function> fn) { return fn.apply(new Builder()).build(); } /** - * Required - Indices that are part of the Search Application. + * Required - Search Application name + *

+ * API name: {@code name} + */ + public final String name() { + return this.name; + } + + /** + * Required - Indices that are part of the Search Application *

* API name: {@code indices} */ @@ -94,23 +105,22 @@ public final List indices() { } /** - * Analytics collection associated to the Search Application. + * Required - Last time the Search Application was updated *

- * API name: {@code analytics_collection_name} + * API name: {@code updated_at_millis} */ - @Nullable - public final String analyticsCollectionName() { - return this.analyticsCollectionName; + public final long updatedAtMillis() { + return this.updatedAtMillis; } /** - * Search template to use on search operations. + * Analytics collection associated to the Search Application *

- * API name: {@code template} + * API name: {@code analytics_collection_name} */ @Nullable - public final SearchApplicationTemplate template() { - return this.template; + public final String analyticsCollectionName() { + return this.analyticsCollectionName; } /** @@ -124,6 +134,9 @@ public void serialize(JsonGenerator generator, JsonpMapper mapper) { protected void serializeInternal(JsonGenerator generator, JsonpMapper mapper) { + generator.writeKey("name"); + generator.write(this.name); + if (ApiTypeHelper.isDefined(this.indices)) { generator.writeKey("indices"); generator.writeStartArray(); @@ -134,16 +147,14 @@ protected void serializeInternal(JsonGenerator generator, JsonpMapper mapper) { generator.writeEnd(); } + generator.writeKey("updated_at_millis"); + generator.write(this.updatedAtMillis); + if (this.analyticsCollectionName != null) { generator.writeKey("analytics_collection_name"); generator.write(this.analyticsCollectionName); } - if (this.template != null) { - generator.writeKey("template"); - this.template.serialize(generator, mapper); - - } } @@ -155,115 +166,109 @@ public String toString() { // --------------------------------------------------------------------------------------------- /** - * Builder for {@link SearchApplicationParameters}. + * Builder for {@link SearchApplicationListItem}. */ - public static class Builder extends SearchApplicationParameters.AbstractBuilder + public static class Builder extends WithJsonObjectBuilderBase implements - ObjectBuilder { - @Override - protected Builder self() { - return this; - } - - /** - * Builds a {@link SearchApplicationParameters}. - * - * @throws NullPointerException - * if some of the required fields are null. - */ - public SearchApplicationParameters build() { - _checkSingleUse(); + ObjectBuilder { + private String name; - return new SearchApplicationParameters(this); - } - } - - public abstract static class AbstractBuilder> - extends - WithJsonObjectBuilderBase { private List indices; + private Long updatedAtMillis; + @Nullable private String analyticsCollectionName; - @Nullable - private SearchApplicationTemplate template; + /** + * Required - Search Application name + *

+ * API name: {@code name} + */ + public final Builder name(String value) { + this.name = value; + return this; + } /** - * Required - Indices that are part of the Search Application. + * Required - Indices that are part of the Search Application *

* API name: {@code indices} *

* Adds all elements of list to indices. */ - public final BuilderT indices(List list) { + public final Builder indices(List list) { this.indices = _listAddAll(this.indices, list); - return self(); + return this; } /** - * Required - Indices that are part of the Search Application. + * Required - Indices that are part of the Search Application *

* API name: {@code indices} *

* Adds one or more values to indices. */ - public final BuilderT indices(String value, String... values) { + public final Builder indices(String value, String... values) { this.indices = _listAdd(this.indices, value, values); - return self(); + return this; } /** - * Analytics collection associated to the Search Application. + * Required - Last time the Search Application was updated *

- * API name: {@code analytics_collection_name} + * API name: {@code updated_at_millis} */ - public final BuilderT analyticsCollectionName(@Nullable String value) { - this.analyticsCollectionName = value; - return self(); + public final Builder updatedAtMillis(long value) { + this.updatedAtMillis = value; + return this; } /** - * Search template to use on search operations. + * Analytics collection associated to the Search Application *

- * API name: {@code template} + * API name: {@code analytics_collection_name} */ - public final BuilderT template(@Nullable SearchApplicationTemplate value) { - this.template = value; - return self(); + public final Builder analyticsCollectionName(@Nullable String value) { + this.analyticsCollectionName = value; + return this; } - /** - * Search template to use on search operations. - *

- * API name: {@code template} - */ - public final BuilderT template( - Function> fn) { - return this.template(fn.apply(new SearchApplicationTemplate.Builder()).build()); + @Override + protected Builder self() { + return this; } - protected abstract BuilderT self(); + /** + * Builds a {@link SearchApplicationListItem}. + * + * @throws NullPointerException + * if some of the required fields are null. + */ + public SearchApplicationListItem build() { + _checkSingleUse(); + return new SearchApplicationListItem(this); + } } // --------------------------------------------------------------------------------------------- /** - * Json deserializer for {@link SearchApplicationParameters} + * Json deserializer for {@link SearchApplicationListItem} */ - public static final JsonpDeserializer _DESERIALIZER = ObjectBuilderDeserializer - .lazy(Builder::new, SearchApplicationParameters::setupSearchApplicationParametersDeserializer); + public static final JsonpDeserializer _DESERIALIZER = ObjectBuilderDeserializer + .lazy(Builder::new, SearchApplicationListItem::setupSearchApplicationListItemDeserializer); - protected static > void setupSearchApplicationParametersDeserializer( - ObjectDeserializer op) { + protected static void setupSearchApplicationListItemDeserializer( + ObjectDeserializer op) { - op.add(AbstractBuilder::indices, JsonpDeserializer.arrayDeserializer(JsonpDeserializer.stringDeserializer()), + op.add(Builder::name, JsonpDeserializer.stringDeserializer(), "name"); + op.add(Builder::indices, JsonpDeserializer.arrayDeserializer(JsonpDeserializer.stringDeserializer()), "indices"); - op.add(AbstractBuilder::analyticsCollectionName, JsonpDeserializer.stringDeserializer(), - "analytics_collection_name"); - op.add(AbstractBuilder::template, SearchApplicationTemplate._DESERIALIZER, "template"); + op.add(Builder::updatedAtMillis, JsonpDeserializer.longDeserializer(), "updated_at_millis"); + op.add(Builder::analyticsCollectionName, JsonpDeserializer.stringDeserializer(), "analytics_collection_name"); } diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/searchable_snapshots/CacheStatsRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/searchable_snapshots/CacheStatsRequest.java index 0d8d16b7c..c807e9642 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/searchable_snapshots/CacheStatsRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/searchable_snapshots/CacheStatsRequest.java @@ -58,8 +58,7 @@ // typedef: searchable_snapshots.cache_stats.Request /** - * Get cache statistics. Get statistics about the shared cache for partially - * mounted indices. + * Retrieve node-level cache statistics about searchable snapshots. * * @see API diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/searchable_snapshots/ClearCacheRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/searchable_snapshots/ClearCacheRequest.java index d5362ffb4..771e420e7 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/searchable_snapshots/ClearCacheRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/searchable_snapshots/ClearCacheRequest.java @@ -59,8 +59,7 @@ // typedef: searchable_snapshots.clear_cache.Request /** - * Clear the cache. Clear indices and data streams from the shared cache for - * partially mounted indices. + * Clear the cache of searchable snapshots. * * @see API diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/searchable_snapshots/ElasticsearchSearchableSnapshotsAsyncClient.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/searchable_snapshots/ElasticsearchSearchableSnapshotsAsyncClient.java index 338f1d307..175f5f222 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/searchable_snapshots/ElasticsearchSearchableSnapshotsAsyncClient.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/searchable_snapshots/ElasticsearchSearchableSnapshotsAsyncClient.java @@ -71,8 +71,7 @@ public ElasticsearchSearchableSnapshotsAsyncClient withTransportOptions( // ----- Endpoint: searchable_snapshots.cache_stats /** - * Get cache statistics. Get statistics about the shared cache for partially - * mounted indices. + * Retrieve node-level cache statistics about searchable snapshots. * * @see Documentation @@ -87,8 +86,7 @@ public CompletableFuture cacheStats(CacheStatsRequest reques } /** - * Get cache statistics. Get statistics about the shared cache for partially - * mounted indices. + * Retrieve node-level cache statistics about searchable snapshots. * * @param fn * a function that initializes a builder to create the @@ -104,8 +102,7 @@ public final CompletableFuture cacheStats( } /** - * Get cache statistics. Get statistics about the shared cache for partially - * mounted indices. + * Retrieve node-level cache statistics about searchable snapshots. * * @see Documentation @@ -120,8 +117,7 @@ public CompletableFuture cacheStats() { // ----- Endpoint: searchable_snapshots.clear_cache /** - * Clear the cache. Clear indices and data streams from the shared cache for - * partially mounted indices. + * Clear the cache of searchable snapshots. * * @see Documentation @@ -136,8 +132,7 @@ public CompletableFuture clearCache(ClearCacheRequest reques } /** - * Clear the cache. Clear indices and data streams from the shared cache for - * partially mounted indices. + * Clear the cache of searchable snapshots. * * @param fn * a function that initializes a builder to create the @@ -153,8 +148,7 @@ public final CompletableFuture clearCache( } /** - * Clear the cache. Clear indices and data streams from the shared cache for - * partially mounted indices. + * Clear the cache of searchable snapshots. * * @see Documentation @@ -169,9 +163,7 @@ public CompletableFuture clearCache() { // ----- Endpoint: searchable_snapshots.mount /** - * Mount a snapshot. Mount a snapshot as a searchable snapshot index. Do not use - * this API for snapshots managed by index lifecycle management (ILM). Manually - * mounting ILM-managed snapshots can interfere with ILM processes. + * Mount a snapshot as a searchable index. * * @see Documentation @@ -186,9 +178,7 @@ public CompletableFuture mount(MountRequest request) { } /** - * Mount a snapshot. Mount a snapshot as a searchable snapshot index. Do not use - * this API for snapshots managed by index lifecycle management (ILM). Manually - * mounting ILM-managed snapshots can interfere with ILM processes. + * Mount a snapshot as a searchable index. * * @param fn * a function that initializes a builder to create the @@ -206,7 +196,7 @@ public final CompletableFuture mount( // ----- Endpoint: searchable_snapshots.stats /** - * Get searchable snapshot statistics. + * Retrieve shard-level statistics about searchable snapshots. * * @see Documentation @@ -221,7 +211,7 @@ public CompletableFuture stats(SearchableSnaps } /** - * Get searchable snapshot statistics. + * Retrieve shard-level statistics about searchable snapshots. * * @param fn * a function that initializes a builder to create the @@ -237,7 +227,7 @@ public final CompletableFuture stats( } /** - * Get searchable snapshot statistics. + * Retrieve shard-level statistics about searchable snapshots. * * @see Documentation diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/searchable_snapshots/ElasticsearchSearchableSnapshotsClient.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/searchable_snapshots/ElasticsearchSearchableSnapshotsClient.java index 6f57e841e..3280a28f0 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/searchable_snapshots/ElasticsearchSearchableSnapshotsClient.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/searchable_snapshots/ElasticsearchSearchableSnapshotsClient.java @@ -71,8 +71,7 @@ public ElasticsearchSearchableSnapshotsClient withTransportOptions(@Nullable Tra // ----- Endpoint: searchable_snapshots.cache_stats /** - * Get cache statistics. Get statistics about the shared cache for partially - * mounted indices. + * Retrieve node-level cache statistics about searchable snapshots. * * @see Documentation @@ -87,8 +86,7 @@ public CacheStatsResponse cacheStats(CacheStatsRequest request) throws IOExcepti } /** - * Get cache statistics. Get statistics about the shared cache for partially - * mounted indices. + * Retrieve node-level cache statistics about searchable snapshots. * * @param fn * a function that initializes a builder to create the @@ -104,8 +102,7 @@ public final CacheStatsResponse cacheStats(FunctionDocumentation @@ -120,8 +117,7 @@ public CacheStatsResponse cacheStats() throws IOException, ElasticsearchExceptio // ----- Endpoint: searchable_snapshots.clear_cache /** - * Clear the cache. Clear indices and data streams from the shared cache for - * partially mounted indices. + * Clear the cache of searchable snapshots. * * @see Documentation @@ -136,8 +132,7 @@ public ClearCacheResponse clearCache(ClearCacheRequest request) throws IOExcepti } /** - * Clear the cache. Clear indices and data streams from the shared cache for - * partially mounted indices. + * Clear the cache of searchable snapshots. * * @param fn * a function that initializes a builder to create the @@ -153,8 +148,7 @@ public final ClearCacheResponse clearCache(FunctionDocumentation @@ -169,9 +163,7 @@ public ClearCacheResponse clearCache() throws IOException, ElasticsearchExceptio // ----- Endpoint: searchable_snapshots.mount /** - * Mount a snapshot. Mount a snapshot as a searchable snapshot index. Do not use - * this API for snapshots managed by index lifecycle management (ILM). Manually - * mounting ILM-managed snapshots can interfere with ILM processes. + * Mount a snapshot as a searchable index. * * @see Documentation @@ -186,9 +178,7 @@ public MountResponse mount(MountRequest request) throws IOException, Elasticsear } /** - * Mount a snapshot. Mount a snapshot as a searchable snapshot index. Do not use - * this API for snapshots managed by index lifecycle management (ILM). Manually - * mounting ILM-managed snapshots can interfere with ILM processes. + * Mount a snapshot as a searchable index. * * @param fn * a function that initializes a builder to create the @@ -206,7 +196,7 @@ public final MountResponse mount(FunctionDocumentation @@ -222,7 +212,7 @@ public SearchableSnapshotsStatsResponse stats(SearchableSnapshotsStatsRequest re } /** - * Get searchable snapshot statistics. + * Retrieve shard-level statistics about searchable snapshots. * * @param fn * a function that initializes a builder to create the @@ -239,7 +229,7 @@ public final SearchableSnapshotsStatsResponse stats( } /** - * Get searchable snapshot statistics. + * Retrieve shard-level statistics about searchable snapshots. * * @see Documentation diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/searchable_snapshots/MountRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/searchable_snapshots/MountRequest.java index 1cd7ce354..aa2964a80 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/searchable_snapshots/MountRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/searchable_snapshots/MountRequest.java @@ -61,9 +61,7 @@ // typedef: searchable_snapshots.mount.Request /** - * Mount a snapshot. Mount a snapshot as a searchable snapshot index. Do not use - * this API for snapshots managed by index lifecycle management (ILM). Manually - * mounting ILM-managed snapshots can interfere with ILM processes. + * Mount a snapshot as a searchable index. * * @see API diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/searchable_snapshots/SearchableSnapshotsStatsRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/searchable_snapshots/SearchableSnapshotsStatsRequest.java index 7b37b9e90..33630a60d 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/searchable_snapshots/SearchableSnapshotsStatsRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/searchable_snapshots/SearchableSnapshotsStatsRequest.java @@ -57,7 +57,7 @@ // typedef: searchable_snapshots.stats.Request /** - * Get searchable snapshot statistics. + * Retrieve shard-level statistics about searchable snapshots. * * @see API diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/security/ElasticsearchSecurityAsyncClient.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/security/ElasticsearchSecurityAsyncClient.java index b7345b790..82e2b73e3 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/security/ElasticsearchSecurityAsyncClient.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/security/ElasticsearchSecurityAsyncClient.java @@ -1140,7 +1140,9 @@ public CompletableFuture getPrivileges() { /** * Get roles. *

- * Get roles in the native realm. + * Get roles in the native realm. The role management APIs are generally the + * preferred way to manage roles, rather than using file-based role management. + * The get roles API cannot retrieve roles that are defined in roles files. * * @see Documentation @@ -1157,7 +1159,9 @@ public CompletableFuture getRole(GetRoleRequest request) { /** * Get roles. *

- * Get roles in the native realm. + * Get roles in the native realm. The role management APIs are generally the + * preferred way to manage roles, rather than using file-based role management. + * The get roles API cannot retrieve roles that are defined in roles files. * * @param fn * a function that initializes a builder to create the @@ -1175,7 +1179,9 @@ public final CompletableFuture getRole( /** * Get roles. *

- * Get roles in the native realm. + * Get roles in the native realm. The role management APIs are generally the + * preferred way to manage roles, rather than using file-based role management. + * The get roles API cannot retrieve roles that are defined in roles files. * * @see Documentation diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/security/ElasticsearchSecurityClient.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/security/ElasticsearchSecurityClient.java index c15eedd89..ae97689ae 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/security/ElasticsearchSecurityClient.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/security/ElasticsearchSecurityClient.java @@ -1170,7 +1170,9 @@ public GetPrivilegesResponse getPrivileges() throws IOException, ElasticsearchEx /** * Get roles. *

- * Get roles in the native realm. + * Get roles in the native realm. The role management APIs are generally the + * preferred way to manage roles, rather than using file-based role management. + * The get roles API cannot retrieve roles that are defined in roles files. * * @see Documentation @@ -1187,7 +1189,9 @@ public GetRoleResponse getRole(GetRoleRequest request) throws IOException, Elast /** * Get roles. *

- * Get roles in the native realm. + * Get roles in the native realm. The role management APIs are generally the + * preferred way to manage roles, rather than using file-based role management. + * The get roles API cannot retrieve roles that are defined in roles files. * * @param fn * a function that initializes a builder to create the @@ -1205,7 +1209,9 @@ public final GetRoleResponse getRole(Function - * Get roles in the native realm. + * Get roles in the native realm. The role management APIs are generally the + * preferred way to manage roles, rather than using file-based role management. + * The get roles API cannot retrieve roles that are defined in roles files. * * @see Documentation diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/security/GetRoleRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/security/GetRoleRequest.java index 96243ac01..aecf52d33 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/security/GetRoleRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/security/GetRoleRequest.java @@ -60,7 +60,9 @@ /** * Get roles. *

- * Get roles in the native realm. + * Get roles in the native realm. The role management APIs are generally the + * preferred way to manage roles, rather than using file-based role management. + * The get roles API cannot retrieve roles that are defined in roles files. * * @see API * specification diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/security/PutRoleRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/security/PutRoleRequest.java index 59d1df3c5..6878b7af0 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/security/PutRoleRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/security/PutRoleRequest.java @@ -181,11 +181,7 @@ public final Map metadata() { } /** - * Required - The name of the role that is being created or updated. On - * Elasticsearch Serverless, the role name must begin with a letter or digit and - * can only contain letters, digits and the characters '_', '-', and '.'. Each - * role must have a unique name, as this will serve as the identifier for that - * role. + * Required - The name of the role. *

* API name: {@code name} */ @@ -566,11 +562,7 @@ public final Builder metadata(String key, JsonData value) { } /** - * Required - The name of the role that is being created or updated. On - * Elasticsearch Serverless, the role name must begin with a letter or digit and - * can only contain letters, digits and the characters '_', '-', and '.'. Each - * role must have a unique name, as this will serve as the identifier for that - * role. + * Required - The name of the role. *

* API name: {@code name} */ diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/shutdown/DeleteNodeRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/shutdown/DeleteNodeRequest.java index 33686353f..cadea1b06 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/shutdown/DeleteNodeRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/shutdown/DeleteNodeRequest.java @@ -56,17 +56,8 @@ // typedef: shutdown.delete_node.Request /** - * Cancel node shutdown preparations. Remove a node from the shutdown list so it - * can resume normal operations. You must explicitly clear the shutdown request - * when a node rejoins the cluster or when a node has permanently left the - * cluster. Shutdown requests are never removed automatically by Elasticsearch. - *

- * NOTE: This feature is designed for indirect use by Elastic Cloud, Elastic - * Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not - * supported. - *

- * If the operator privileges feature is enabled, you must be an operator to use - * this API. + * Removes a node from the shutdown list. Designed for indirect use by ECE/ESS + * and ECK. Direct use is not supported. * * @see API * specification diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/shutdown/ElasticsearchShutdownAsyncClient.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/shutdown/ElasticsearchShutdownAsyncClient.java index c9c33f601..e32836ca0 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/shutdown/ElasticsearchShutdownAsyncClient.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/shutdown/ElasticsearchShutdownAsyncClient.java @@ -70,17 +70,8 @@ public ElasticsearchShutdownAsyncClient withTransportOptions(@Nullable Transport // ----- Endpoint: shutdown.delete_node /** - * Cancel node shutdown preparations. Remove a node from the shutdown list so it - * can resume normal operations. You must explicitly clear the shutdown request - * when a node rejoins the cluster or when a node has permanently left the - * cluster. Shutdown requests are never removed automatically by Elasticsearch. - *

- * NOTE: This feature is designed for indirect use by Elastic Cloud, Elastic - * Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not - * supported. - *

- * If the operator privileges feature is enabled, you must be an operator to use - * this API. + * Removes a node from the shutdown list. Designed for indirect use by ECE/ESS + * and ECK. Direct use is not supported. * * @see Documentation @@ -95,17 +86,8 @@ public CompletableFuture deleteNode(DeleteNodeRequest reques } /** - * Cancel node shutdown preparations. Remove a node from the shutdown list so it - * can resume normal operations. You must explicitly clear the shutdown request - * when a node rejoins the cluster or when a node has permanently left the - * cluster. Shutdown requests are never removed automatically by Elasticsearch. - *

- * NOTE: This feature is designed for indirect use by Elastic Cloud, Elastic - * Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not - * supported. - *

- * If the operator privileges feature is enabled, you must be an operator to use - * this API. + * Removes a node from the shutdown list. Designed for indirect use by ECE/ESS + * and ECK. Direct use is not supported. * * @param fn * a function that initializes a builder to create the @@ -123,18 +105,9 @@ public final CompletableFuture deleteNode( // ----- Endpoint: shutdown.get_node /** - * Get the shutdown status. - *

- * Get information about nodes that are ready to be shut down, have shut down - * preparations still in progress, or have stalled. The API returns status - * information for each part of the shut down process. - *

- * NOTE: This feature is designed for indirect use by Elasticsearch Service, - * Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not + * Retrieve status of a node or nodes that are currently marked as shutting + * down. Designed for indirect use by ECE/ESS and ECK. Direct use is not * supported. - *

- * If the operator privileges feature is enabled, you must be an operator to use - * this API. * * @see Documentation @@ -149,18 +122,9 @@ public CompletableFuture getNode(GetNodeRequest request) { } /** - * Get the shutdown status. - *

- * Get information about nodes that are ready to be shut down, have shut down - * preparations still in progress, or have stalled. The API returns status - * information for each part of the shut down process. - *

- * NOTE: This feature is designed for indirect use by Elasticsearch Service, - * Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not + * Retrieve status of a node or nodes that are currently marked as shutting + * down. Designed for indirect use by ECE/ESS and ECK. Direct use is not * supported. - *

- * If the operator privileges feature is enabled, you must be an operator to use - * this API. * * @param fn * a function that initializes a builder to create the @@ -176,18 +140,9 @@ public final CompletableFuture getNode( } /** - * Get the shutdown status. - *

- * Get information about nodes that are ready to be shut down, have shut down - * preparations still in progress, or have stalled. The API returns status - * information for each part of the shut down process. - *

- * NOTE: This feature is designed for indirect use by Elasticsearch Service, - * Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not + * Retrieve status of a node or nodes that are currently marked as shutting + * down. Designed for indirect use by ECE/ESS and ECK. Direct use is not * supported. - *

- * If the operator privileges feature is enabled, you must be an operator to use - * this API. * * @see Documentation @@ -202,26 +157,8 @@ public CompletableFuture getNode() { // ----- Endpoint: shutdown.put_node /** - * Prepare a node to be shut down. - *

- * NOTE: This feature is designed for indirect use by Elastic Cloud, Elastic - * Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not - * supported. - *

- * If the operator privileges feature is enabled, you must be an operator to use - * this API. - *

- * The API migrates ongoing tasks and index shards to other nodes as needed to - * prepare a node to be restarted or shut down and removed from the cluster. - * This ensures that Elasticsearch can be stopped safely with minimal disruption - * to the cluster. - *

- * You must specify the type of shutdown: restart, - * remove, or replace. If a node is already being - * prepared for shutdown, you can use this API to change the shutdown type. - *

- * IMPORTANT: This API does NOT terminate the Elasticsearch process. Monitor the - * node shutdown status to determine when it is safe to stop Elasticsearch. + * Adds a node to be shut down. Designed for indirect use by ECE/ESS and ECK. + * Direct use is not supported. * * @see Documentation @@ -236,26 +173,8 @@ public CompletableFuture putNode(PutNodeRequest request) { } /** - * Prepare a node to be shut down. - *

- * NOTE: This feature is designed for indirect use by Elastic Cloud, Elastic - * Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not - * supported. - *

- * If the operator privileges feature is enabled, you must be an operator to use - * this API. - *

- * The API migrates ongoing tasks and index shards to other nodes as needed to - * prepare a node to be restarted or shut down and removed from the cluster. - * This ensures that Elasticsearch can be stopped safely with minimal disruption - * to the cluster. - *

- * You must specify the type of shutdown: restart, - * remove, or replace. If a node is already being - * prepared for shutdown, you can use this API to change the shutdown type. - *

- * IMPORTANT: This API does NOT terminate the Elasticsearch process. Monitor the - * node shutdown status to determine when it is safe to stop Elasticsearch. + * Adds a node to be shut down. Designed for indirect use by ECE/ESS and ECK. + * Direct use is not supported. * * @param fn * a function that initializes a builder to create the diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/shutdown/ElasticsearchShutdownClient.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/shutdown/ElasticsearchShutdownClient.java index 840334b97..d697c22ee 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/shutdown/ElasticsearchShutdownClient.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/shutdown/ElasticsearchShutdownClient.java @@ -68,17 +68,8 @@ public ElasticsearchShutdownClient withTransportOptions(@Nullable TransportOptio // ----- Endpoint: shutdown.delete_node /** - * Cancel node shutdown preparations. Remove a node from the shutdown list so it - * can resume normal operations. You must explicitly clear the shutdown request - * when a node rejoins the cluster or when a node has permanently left the - * cluster. Shutdown requests are never removed automatically by Elasticsearch. - *

- * NOTE: This feature is designed for indirect use by Elastic Cloud, Elastic - * Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not - * supported. - *

- * If the operator privileges feature is enabled, you must be an operator to use - * this API. + * Removes a node from the shutdown list. Designed for indirect use by ECE/ESS + * and ECK. Direct use is not supported. * * @see Documentation @@ -93,17 +84,8 @@ public DeleteNodeResponse deleteNode(DeleteNodeRequest request) throws IOExcepti } /** - * Cancel node shutdown preparations. Remove a node from the shutdown list so it - * can resume normal operations. You must explicitly clear the shutdown request - * when a node rejoins the cluster or when a node has permanently left the - * cluster. Shutdown requests are never removed automatically by Elasticsearch. - *

- * NOTE: This feature is designed for indirect use by Elastic Cloud, Elastic - * Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not - * supported. - *

- * If the operator privileges feature is enabled, you must be an operator to use - * this API. + * Removes a node from the shutdown list. Designed for indirect use by ECE/ESS + * and ECK. Direct use is not supported. * * @param fn * a function that initializes a builder to create the @@ -121,18 +103,9 @@ public final DeleteNodeResponse deleteNode(Function - * Get information about nodes that are ready to be shut down, have shut down - * preparations still in progress, or have stalled. The API returns status - * information for each part of the shut down process. - *

- * NOTE: This feature is designed for indirect use by Elasticsearch Service, - * Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not + * Retrieve status of a node or nodes that are currently marked as shutting + * down. Designed for indirect use by ECE/ESS and ECK. Direct use is not * supported. - *

- * If the operator privileges feature is enabled, you must be an operator to use - * this API. * * @see Documentation @@ -147,18 +120,9 @@ public GetNodeResponse getNode(GetNodeRequest request) throws IOException, Elast } /** - * Get the shutdown status. - *

- * Get information about nodes that are ready to be shut down, have shut down - * preparations still in progress, or have stalled. The API returns status - * information for each part of the shut down process. - *

- * NOTE: This feature is designed for indirect use by Elasticsearch Service, - * Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not + * Retrieve status of a node or nodes that are currently marked as shutting + * down. Designed for indirect use by ECE/ESS and ECK. Direct use is not * supported. - *

- * If the operator privileges feature is enabled, you must be an operator to use - * this API. * * @param fn * a function that initializes a builder to create the @@ -174,18 +138,9 @@ public final GetNodeResponse getNode(Function - * Get information about nodes that are ready to be shut down, have shut down - * preparations still in progress, or have stalled. The API returns status - * information for each part of the shut down process. - *

- * NOTE: This feature is designed for indirect use by Elasticsearch Service, - * Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not + * Retrieve status of a node or nodes that are currently marked as shutting + * down. Designed for indirect use by ECE/ESS and ECK. Direct use is not * supported. - *

- * If the operator privileges feature is enabled, you must be an operator to use - * this API. * * @see Documentation @@ -200,26 +155,8 @@ public GetNodeResponse getNode() throws IOException, ElasticsearchException { // ----- Endpoint: shutdown.put_node /** - * Prepare a node to be shut down. - *

- * NOTE: This feature is designed for indirect use by Elastic Cloud, Elastic - * Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not - * supported. - *

- * If the operator privileges feature is enabled, you must be an operator to use - * this API. - *

- * The API migrates ongoing tasks and index shards to other nodes as needed to - * prepare a node to be restarted or shut down and removed from the cluster. - * This ensures that Elasticsearch can be stopped safely with minimal disruption - * to the cluster. - *

- * You must specify the type of shutdown: restart, - * remove, or replace. If a node is already being - * prepared for shutdown, you can use this API to change the shutdown type. - *

- * IMPORTANT: This API does NOT terminate the Elasticsearch process. Monitor the - * node shutdown status to determine when it is safe to stop Elasticsearch. + * Adds a node to be shut down. Designed for indirect use by ECE/ESS and ECK. + * Direct use is not supported. * * @see Documentation @@ -234,26 +171,8 @@ public PutNodeResponse putNode(PutNodeRequest request) throws IOException, Elast } /** - * Prepare a node to be shut down. - *

- * NOTE: This feature is designed for indirect use by Elastic Cloud, Elastic - * Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not - * supported. - *

- * If the operator privileges feature is enabled, you must be an operator to use - * this API. - *

- * The API migrates ongoing tasks and index shards to other nodes as needed to - * prepare a node to be restarted or shut down and removed from the cluster. - * This ensures that Elasticsearch can be stopped safely with minimal disruption - * to the cluster. - *

- * You must specify the type of shutdown: restart, - * remove, or replace. If a node is already being - * prepared for shutdown, you can use this API to change the shutdown type. - *

- * IMPORTANT: This API does NOT terminate the Elasticsearch process. Monitor the - * node shutdown status to determine when it is safe to stop Elasticsearch. + * Adds a node to be shut down. Designed for indirect use by ECE/ESS and ECK. + * Direct use is not supported. * * @param fn * a function that initializes a builder to create the diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/shutdown/GetNodeRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/shutdown/GetNodeRequest.java index 1c3dee599..42ee075c2 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/shutdown/GetNodeRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/shutdown/GetNodeRequest.java @@ -58,18 +58,9 @@ // typedef: shutdown.get_node.Request /** - * Get the shutdown status. - *

- * Get information about nodes that are ready to be shut down, have shut down - * preparations still in progress, or have stalled. The API returns status - * information for each part of the shut down process. - *

- * NOTE: This feature is designed for indirect use by Elasticsearch Service, - * Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not + * Retrieve status of a node or nodes that are currently marked as shutting + * down. Designed for indirect use by ECE/ESS and ECK. Direct use is not * supported. - *

- * If the operator privileges feature is enabled, you must be an operator to use - * this API. * * @see API * specification diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/shutdown/PutNodeRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/shutdown/PutNodeRequest.java index af64a69ab..0eaa0a97b 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/shutdown/PutNodeRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/shutdown/PutNodeRequest.java @@ -58,26 +58,8 @@ // typedef: shutdown.put_node.Request /** - * Prepare a node to be shut down. - *

- * NOTE: This feature is designed for indirect use by Elastic Cloud, Elastic - * Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not - * supported. - *

- * If the operator privileges feature is enabled, you must be an operator to use - * this API. - *

- * The API migrates ongoing tasks and index shards to other nodes as needed to - * prepare a node to be restarted or shut down and removed from the cluster. - * This ensures that Elasticsearch can be stopped safely with minimal disruption - * to the cluster. - *

- * You must specify the type of shutdown: restart, - * remove, or replace. If a node is already being - * prepared for shutdown, you can use this API to change the shutdown type. - *

- * IMPORTANT: This API does NOT terminate the Elasticsearch process. Monitor the - * node shutdown status to determine when it is safe to stop Elasticsearch. + * Adds a node to be shut down. Designed for indirect use by ECE/ESS and ECK. + * Direct use is not supported. * * @see API * specification diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/slm/DeleteLifecycleRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/slm/DeleteLifecycleRequest.java index c49fcccfc..ef16269fb 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/slm/DeleteLifecycleRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/slm/DeleteLifecycleRequest.java @@ -56,9 +56,7 @@ // typedef: slm.delete_lifecycle.Request /** - * Delete a policy. Delete a snapshot lifecycle policy definition. This - * operation prevents any future snapshots from being taken but does not cancel - * in-progress snapshots or remove previously-taken snapshots. + * Deletes an existing snapshot lifecycle policy. * * @see API * specification diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/slm/ElasticsearchSlmAsyncClient.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/slm/ElasticsearchSlmAsyncClient.java index 85e081f42..43a51bc51 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/slm/ElasticsearchSlmAsyncClient.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/slm/ElasticsearchSlmAsyncClient.java @@ -67,9 +67,7 @@ public ElasticsearchSlmAsyncClient withTransportOptions(@Nullable TransportOptio // ----- Endpoint: slm.delete_lifecycle /** - * Delete a policy. Delete a snapshot lifecycle policy definition. This - * operation prevents any future snapshots from being taken but does not cancel - * in-progress snapshots or remove previously-taken snapshots. + * Deletes an existing snapshot lifecycle policy. * * @see Documentation @@ -84,9 +82,7 @@ public CompletableFuture deleteLifecycle(DeleteLifecycl } /** - * Delete a policy. Delete a snapshot lifecycle policy definition. This - * operation prevents any future snapshots from being taken but does not cancel - * in-progress snapshots or remove previously-taken snapshots. + * Deletes an existing snapshot lifecycle policy. * * @param fn * a function that initializes a builder to create the @@ -104,10 +100,8 @@ public final CompletableFuture deleteLifecycle( // ----- Endpoint: slm.execute_lifecycle /** - * Run a policy. Immediately create a snapshot according to the snapshot - * lifecycle policy without waiting for the scheduled time. The snapshot policy - * is normally applied according to its schedule, but you might want to manually - * run a policy before performing an upgrade or other maintenance. + * Immediately creates a snapshot according to the lifecycle policy, without + * waiting for the scheduled time. * * @see Documentation @@ -122,10 +116,8 @@ public CompletableFuture executeLifecycle(ExecuteLifec } /** - * Run a policy. Immediately create a snapshot according to the snapshot - * lifecycle policy without waiting for the scheduled time. The snapshot policy - * is normally applied according to its schedule, but you might want to manually - * run a policy before performing an upgrade or other maintenance. + * Immediately creates a snapshot according to the lifecycle policy, without + * waiting for the scheduled time. * * @param fn * a function that initializes a builder to create the @@ -143,10 +135,8 @@ public final CompletableFuture executeLifecycle( // ----- Endpoint: slm.execute_retention /** - * Run a retention policy. Manually apply the retention policy to force - * immediate removal of snapshots that are expired according to the snapshot - * lifecycle policy retention rules. The retention policy is normally applied - * according to its schedule. + * Deletes any snapshots that are expired according to the policy's retention + * rules. * * @see Documentation @@ -160,8 +150,8 @@ public CompletableFuture executeRetention() { // ----- Endpoint: slm.get_lifecycle /** - * Get policy information. Get snapshot lifecycle policy definitions and - * information about the latest snapshot attempts. + * Retrieves one or more snapshot lifecycle policy definitions and information + * about the latest snapshot attempts. * * @see Documentation @@ -176,8 +166,8 @@ public CompletableFuture getLifecycle(GetLifecycleRequest } /** - * Get policy information. Get snapshot lifecycle policy definitions and - * information about the latest snapshot attempts. + * Retrieves one or more snapshot lifecycle policy definitions and information + * about the latest snapshot attempts. * * @param fn * a function that initializes a builder to create the @@ -193,8 +183,8 @@ public final CompletableFuture getLifecycle( } /** - * Get policy information. Get snapshot lifecycle policy definitions and - * information about the latest snapshot attempts. + * Retrieves one or more snapshot lifecycle policy definitions and information + * about the latest snapshot attempts. * * @see Documentation @@ -209,8 +199,8 @@ public CompletableFuture getLifecycle() { // ----- Endpoint: slm.get_stats /** - * Get snapshot lifecycle management statistics. Get global and policy-level - * statistics about actions taken by snapshot lifecycle management. + * Returns global and policy-level statistics about actions taken by snapshot + * lifecycle management. * * @see Documentation @@ -224,7 +214,7 @@ public CompletableFuture getStats() { // ----- Endpoint: slm.get_status /** - * Get the snapshot lifecycle management status. + * Retrieves the status of snapshot lifecycle management (SLM). * * @see Documentation @@ -238,9 +228,7 @@ public CompletableFuture getStatus() { // ----- Endpoint: slm.put_lifecycle /** - * Create or update a policy. Create or update a snapshot lifecycle policy. If - * the policy already exists, this request increments the policy version. Only - * the latest version of a policy is stored. + * Creates or updates a snapshot lifecycle policy. * * @see Documentation @@ -255,9 +243,7 @@ public CompletableFuture putLifecycle(PutLifecycleRequest } /** - * Create or update a policy. Create or update a snapshot lifecycle policy. If - * the policy already exists, this request increments the policy version. Only - * the latest version of a policy is stored. + * Creates or updates a snapshot lifecycle policy. * * @param fn * a function that initializes a builder to create the @@ -275,9 +261,7 @@ public final CompletableFuture putLifecycle( // ----- Endpoint: slm.start /** - * Start snapshot lifecycle management. Snapshot lifecycle management (SLM) - * starts automatically when a cluster is formed. Manually starting SLM is - * necessary only if it has been stopped using the stop SLM API. + * Turns on snapshot lifecycle management (SLM). * * @see Documentation @@ -291,17 +275,7 @@ public CompletableFuture start() { // ----- Endpoint: slm.stop /** - * Stop snapshot lifecycle management. Stop all snapshot lifecycle management - * (SLM) operations and the SLM plugin. This API is useful when you are - * performing maintenance on a cluster and need to prevent SLM from performing - * any actions on your data streams or indices. Stopping SLM does not stop any - * snapshots that are in progress. You can manually trigger snapshots with the - * run snapshot lifecycle policy API even if SLM is stopped. - *

- * The API returns a response as soon as the request is acknowledged, but the - * plugin might continue to run until in-progress operations complete and it can - * be safely stopped. Use the get snapshot lifecycle management status API to - * see if SLM is running. + * Turns off snapshot lifecycle management (SLM). * * @see Documentation diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/slm/ElasticsearchSlmClient.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/slm/ElasticsearchSlmClient.java index ca233fa6d..a6cd5188e 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/slm/ElasticsearchSlmClient.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/slm/ElasticsearchSlmClient.java @@ -68,9 +68,7 @@ public ElasticsearchSlmClient withTransportOptions(@Nullable TransportOptions tr // ----- Endpoint: slm.delete_lifecycle /** - * Delete a policy. Delete a snapshot lifecycle policy definition. This - * operation prevents any future snapshots from being taken but does not cancel - * in-progress snapshots or remove previously-taken snapshots. + * Deletes an existing snapshot lifecycle policy. * * @see Documentation @@ -86,9 +84,7 @@ public DeleteLifecycleResponse deleteLifecycle(DeleteLifecycleRequest request) } /** - * Delete a policy. Delete a snapshot lifecycle policy definition. This - * operation prevents any future snapshots from being taken but does not cancel - * in-progress snapshots or remove previously-taken snapshots. + * Deletes an existing snapshot lifecycle policy. * * @param fn * a function that initializes a builder to create the @@ -107,10 +103,8 @@ public final DeleteLifecycleResponse deleteLifecycle( // ----- Endpoint: slm.execute_lifecycle /** - * Run a policy. Immediately create a snapshot according to the snapshot - * lifecycle policy without waiting for the scheduled time. The snapshot policy - * is normally applied according to its schedule, but you might want to manually - * run a policy before performing an upgrade or other maintenance. + * Immediately creates a snapshot according to the lifecycle policy, without + * waiting for the scheduled time. * * @see Documentation @@ -126,10 +120,8 @@ public ExecuteLifecycleResponse executeLifecycle(ExecuteLifecycleRequest request } /** - * Run a policy. Immediately create a snapshot according to the snapshot - * lifecycle policy without waiting for the scheduled time. The snapshot policy - * is normally applied according to its schedule, but you might want to manually - * run a policy before performing an upgrade or other maintenance. + * Immediately creates a snapshot according to the lifecycle policy, without + * waiting for the scheduled time. * * @param fn * a function that initializes a builder to create the @@ -148,10 +140,8 @@ public final ExecuteLifecycleResponse executeLifecycle( // ----- Endpoint: slm.execute_retention /** - * Run a retention policy. Manually apply the retention policy to force - * immediate removal of snapshots that are expired according to the snapshot - * lifecycle policy retention rules. The retention policy is normally applied - * according to its schedule. + * Deletes any snapshots that are expired according to the policy's retention + * rules. * * @see Documentation @@ -165,8 +155,8 @@ public ExecuteRetentionResponse executeRetention() throws IOException, Elasticse // ----- Endpoint: slm.get_lifecycle /** - * Get policy information. Get snapshot lifecycle policy definitions and - * information about the latest snapshot attempts. + * Retrieves one or more snapshot lifecycle policy definitions and information + * about the latest snapshot attempts. * * @see Documentation @@ -181,8 +171,8 @@ public GetLifecycleResponse getLifecycle(GetLifecycleRequest request) throws IOE } /** - * Get policy information. Get snapshot lifecycle policy definitions and - * information about the latest snapshot attempts. + * Retrieves one or more snapshot lifecycle policy definitions and information + * about the latest snapshot attempts. * * @param fn * a function that initializes a builder to create the @@ -199,8 +189,8 @@ public final GetLifecycleResponse getLifecycle( } /** - * Get policy information. Get snapshot lifecycle policy definitions and - * information about the latest snapshot attempts. + * Retrieves one or more snapshot lifecycle policy definitions and information + * about the latest snapshot attempts. * * @see Documentation @@ -215,8 +205,8 @@ public GetLifecycleResponse getLifecycle() throws IOException, ElasticsearchExce // ----- Endpoint: slm.get_stats /** - * Get snapshot lifecycle management statistics. Get global and policy-level - * statistics about actions taken by snapshot lifecycle management. + * Returns global and policy-level statistics about actions taken by snapshot + * lifecycle management. * * @see Documentation @@ -230,7 +220,7 @@ public GetStatsResponse getStats() throws IOException, ElasticsearchException { // ----- Endpoint: slm.get_status /** - * Get the snapshot lifecycle management status. + * Retrieves the status of snapshot lifecycle management (SLM). * * @see Documentation @@ -244,9 +234,7 @@ public GetSlmStatusResponse getStatus() throws IOException, ElasticsearchExcepti // ----- Endpoint: slm.put_lifecycle /** - * Create or update a policy. Create or update a snapshot lifecycle policy. If - * the policy already exists, this request increments the policy version. Only - * the latest version of a policy is stored. + * Creates or updates a snapshot lifecycle policy. * * @see Documentation @@ -261,9 +249,7 @@ public PutLifecycleResponse putLifecycle(PutLifecycleRequest request) throws IOE } /** - * Create or update a policy. Create or update a snapshot lifecycle policy. If - * the policy already exists, this request increments the policy version. Only - * the latest version of a policy is stored. + * Creates or updates a snapshot lifecycle policy. * * @param fn * a function that initializes a builder to create the @@ -282,9 +268,7 @@ public final PutLifecycleResponse putLifecycle( // ----- Endpoint: slm.start /** - * Start snapshot lifecycle management. Snapshot lifecycle management (SLM) - * starts automatically when a cluster is formed. Manually starting SLM is - * necessary only if it has been stopped using the stop SLM API. + * Turns on snapshot lifecycle management (SLM). * * @see Documentation @@ -298,17 +282,7 @@ public StartSlmResponse start() throws IOException, ElasticsearchException { // ----- Endpoint: slm.stop /** - * Stop snapshot lifecycle management. Stop all snapshot lifecycle management - * (SLM) operations and the SLM plugin. This API is useful when you are - * performing maintenance on a cluster and need to prevent SLM from performing - * any actions on your data streams or indices. Stopping SLM does not stop any - * snapshots that are in progress. You can manually trigger snapshots with the - * run snapshot lifecycle policy API even if SLM is stopped. - *

- * The API returns a response as soon as the request is acknowledged, but the - * plugin might continue to run until in-progress operations complete and it can - * be safely stopped. Use the get snapshot lifecycle management status API to - * see if SLM is running. + * Turns off snapshot lifecycle management (SLM). * * @see Documentation diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/slm/ExecuteLifecycleRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/slm/ExecuteLifecycleRequest.java index abda3524d..fd0983d7e 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/slm/ExecuteLifecycleRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/slm/ExecuteLifecycleRequest.java @@ -56,10 +56,8 @@ // typedef: slm.execute_lifecycle.Request /** - * Run a policy. Immediately create a snapshot according to the snapshot - * lifecycle policy without waiting for the scheduled time. The snapshot policy - * is normally applied according to its schedule, but you might want to manually - * run a policy before performing an upgrade or other maintenance. + * Immediately creates a snapshot according to the lifecycle policy, without + * waiting for the scheduled time. * * @see API * specification diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/slm/ExecuteRetentionRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/slm/ExecuteRetentionRequest.java index 9755c009d..57ec5c733 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/slm/ExecuteRetentionRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/slm/ExecuteRetentionRequest.java @@ -50,10 +50,8 @@ // typedef: slm.execute_retention.Request /** - * Run a retention policy. Manually apply the retention policy to force - * immediate removal of snapshots that are expired according to the snapshot - * lifecycle policy retention rules. The retention policy is normally applied - * according to its schedule. + * Deletes any snapshots that are expired according to the policy's retention + * rules. * * @see API * specification diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/slm/GetLifecycleRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/slm/GetLifecycleRequest.java index f7b866993..6df555883 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/slm/GetLifecycleRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/slm/GetLifecycleRequest.java @@ -58,8 +58,8 @@ // typedef: slm.get_lifecycle.Request /** - * Get policy information. Get snapshot lifecycle policy definitions and - * information about the latest snapshot attempts. + * Retrieves one or more snapshot lifecycle policy definitions and information + * about the latest snapshot attempts. * * @see API * specification diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/slm/GetSlmStatusRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/slm/GetSlmStatusRequest.java index fe707abbf..d575fbbe7 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/slm/GetSlmStatusRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/slm/GetSlmStatusRequest.java @@ -50,7 +50,7 @@ // typedef: slm.get_status.Request /** - * Get the snapshot lifecycle management status. + * Retrieves the status of snapshot lifecycle management (SLM). * * @see API * specification diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/slm/GetStatsRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/slm/GetStatsRequest.java index bd7b30c49..9fd0ed656 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/slm/GetStatsRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/slm/GetStatsRequest.java @@ -50,8 +50,8 @@ // typedef: slm.get_stats.Request /** - * Get snapshot lifecycle management statistics. Get global and policy-level - * statistics about actions taken by snapshot lifecycle management. + * Returns global and policy-level statistics about actions taken by snapshot + * lifecycle management. * * @see API * specification diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/slm/PutLifecycleRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/slm/PutLifecycleRequest.java index 5f1803804..ad68597ae 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/slm/PutLifecycleRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/slm/PutLifecycleRequest.java @@ -58,9 +58,7 @@ // typedef: slm.put_lifecycle.Request /** - * Create or update a policy. Create or update a snapshot lifecycle policy. If - * the policy already exists, this request increments the policy version. Only - * the latest version of a policy is stored. + * Creates or updates a snapshot lifecycle policy. * * @see API * specification @@ -143,8 +141,7 @@ public final String name() { } /** - * Required - The identifier for the snapshot lifecycle policy you want to - * create or update. + * Required - ID for the snapshot lifecycle policy you want to create or update. *

* API name: {@code policy_id} */ @@ -320,8 +317,7 @@ public final Builder name(@Nullable String value) { } /** - * Required - The identifier for the snapshot lifecycle policy you want to - * create or update. + * Required - ID for the snapshot lifecycle policy you want to create or update. *

* API name: {@code policy_id} */ diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/slm/StartSlmRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/slm/StartSlmRequest.java index c6ae7dcd9..2be9ce3f7 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/slm/StartSlmRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/slm/StartSlmRequest.java @@ -50,9 +50,7 @@ // typedef: slm.start.Request /** - * Start snapshot lifecycle management. Snapshot lifecycle management (SLM) - * starts automatically when a cluster is formed. Manually starting SLM is - * necessary only if it has been stopped using the stop SLM API. + * Turns on snapshot lifecycle management (SLM). * * @see API * specification diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/slm/StopSlmRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/slm/StopSlmRequest.java index 89925a268..bc7a32f73 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/slm/StopSlmRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/slm/StopSlmRequest.java @@ -50,17 +50,7 @@ // typedef: slm.stop.Request /** - * Stop snapshot lifecycle management. Stop all snapshot lifecycle management - * (SLM) operations and the SLM plugin. This API is useful when you are - * performing maintenance on a cluster and need to prevent SLM from performing - * any actions on your data streams or indices. Stopping SLM does not stop any - * snapshots that are in progress. You can manually trigger snapshots with the - * run snapshot lifecycle policy API even if SLM is stopped. - *

- * The API returns a response as soon as the request is acknowledged, but the - * plugin might continue to run until in-progress operations complete and it can - * be safely stopped. Use the get snapshot lifecycle management status API to - * see if SLM is running. + * Turns off snapshot lifecycle management (SLM). * * @see API * specification diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/snapshot/CleanupRepositoryRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/snapshot/CleanupRepositoryRequest.java index 759d3157a..5626be8bd 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/snapshot/CleanupRepositoryRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/snapshot/CleanupRepositoryRequest.java @@ -56,9 +56,8 @@ // typedef: snapshot.cleanup_repository.Request /** - * Clean up the snapshot repository. Trigger the review of the contents of a - * snapshot repository and delete any stale data not referenced by existing - * snapshots. + * Triggers the review of a snapshot repository’s contents and deletes any stale + * data not referenced by existing snapshots. * * @see API diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/snapshot/CloneSnapshotRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/snapshot/CloneSnapshotRequest.java index ab5a8ea0b..d23514cb8 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/snapshot/CloneSnapshotRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/snapshot/CloneSnapshotRequest.java @@ -58,8 +58,8 @@ // typedef: snapshot.clone.Request /** - * Clone a snapshot. Clone part of all of a snapshot into another snapshot in - * the same repository. + * Clones indices from one snapshot into another snapshot in the same + * repository. * * @see API * specification diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/snapshot/CreateRepositoryRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/snapshot/CreateRepositoryRequest.java index 86eeca000..0bb69501c 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/snapshot/CreateRepositoryRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/snapshot/CreateRepositoryRequest.java @@ -60,13 +60,7 @@ // typedef: snapshot.create_repository.Request /** - * Create or update a snapshot repository. IMPORTANT: If you are migrating - * searchable snapshots, the repository name must be identical in the source and - * destination clusters. To register a snapshot repository, the cluster's global - * metadata must be writeable. Ensure there are no cluster blocks (for example, - * cluster.blocks.read_only and - * clsuter.blocks.read_only_allow_delete settings) that prevent - * write access. + * Creates a repository. * * @see API diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/snapshot/CreateSnapshotRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/snapshot/CreateSnapshotRequest.java index fc6be7a48..5a415b198 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/snapshot/CreateSnapshotRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/snapshot/CreateSnapshotRequest.java @@ -61,8 +61,7 @@ // typedef: snapshot.create.Request /** - * Create a snapshot. Take a snapshot of a cluster or of data streams and - * indices. + * Creates a snapshot in a repository. * * @see API * specification diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/snapshot/DeleteRepositoryRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/snapshot/DeleteRepositoryRequest.java index 7521c6549..c8f2587ea 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/snapshot/DeleteRepositoryRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/snapshot/DeleteRepositoryRequest.java @@ -58,10 +58,7 @@ // typedef: snapshot.delete_repository.Request /** - * Delete snapshot repositories. When a repository is unregistered, - * Elasticsearch removes only the reference to the location where the repository - * is storing the snapshots. The snapshots themselves are left untouched and in - * place. + * Deletes a repository. * * @see API diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/snapshot/DeleteSnapshotRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/snapshot/DeleteSnapshotRequest.java index cfd12fe6b..fc29ad635 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/snapshot/DeleteSnapshotRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/snapshot/DeleteSnapshotRequest.java @@ -56,7 +56,7 @@ // typedef: snapshot.delete.Request /** - * Delete snapshots. + * Deletes one or more snapshots. * * @see API * specification diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/snapshot/ElasticsearchSnapshotAsyncClient.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/snapshot/ElasticsearchSnapshotAsyncClient.java index 5d1708b5f..0b5bdfaca 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/snapshot/ElasticsearchSnapshotAsyncClient.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/snapshot/ElasticsearchSnapshotAsyncClient.java @@ -70,9 +70,8 @@ public ElasticsearchSnapshotAsyncClient withTransportOptions(@Nullable Transport // ----- Endpoint: snapshot.cleanup_repository /** - * Clean up the snapshot repository. Trigger the review of the contents of a - * snapshot repository and delete any stale data not referenced by existing - * snapshots. + * Triggers the review of a snapshot repository’s contents and deletes any stale + * data not referenced by existing snapshots. * * @see Documentation @@ -87,9 +86,8 @@ public CompletableFuture cleanupRepository(CleanupRep } /** - * Clean up the snapshot repository. Trigger the review of the contents of a - * snapshot repository and delete any stale data not referenced by existing - * snapshots. + * Triggers the review of a snapshot repository’s contents and deletes any stale + * data not referenced by existing snapshots. * * @param fn * a function that initializes a builder to create the @@ -107,8 +105,8 @@ public final CompletableFuture cleanupRepository( // ----- Endpoint: snapshot.clone /** - * Clone a snapshot. Clone part of all of a snapshot into another snapshot in - * the same repository. + * Clones indices from one snapshot into another snapshot in the same + * repository. * * @see Documentation @@ -123,8 +121,8 @@ public CompletableFuture clone(CloneSnapshotRequest reque } /** - * Clone a snapshot. Clone part of all of a snapshot into another snapshot in - * the same repository. + * Clones indices from one snapshot into another snapshot in the same + * repository. * * @param fn * a function that initializes a builder to create the @@ -142,8 +140,7 @@ public final CompletableFuture clone( // ----- Endpoint: snapshot.create /** - * Create a snapshot. Take a snapshot of a cluster or of data streams and - * indices. + * Creates a snapshot in a repository. * * @see Documentation @@ -158,8 +155,7 @@ public CompletableFuture create(CreateSnapshotRequest re } /** - * Create a snapshot. Take a snapshot of a cluster or of data streams and - * indices. + * Creates a snapshot in a repository. * * @param fn * a function that initializes a builder to create the @@ -177,13 +173,7 @@ public final CompletableFuture create( // ----- Endpoint: snapshot.create_repository /** - * Create or update a snapshot repository. IMPORTANT: If you are migrating - * searchable snapshots, the repository name must be identical in the source and - * destination clusters. To register a snapshot repository, the cluster's global - * metadata must be writeable. Ensure there are no cluster blocks (for example, - * cluster.blocks.read_only and - * clsuter.blocks.read_only_allow_delete settings) that prevent - * write access. + * Creates a repository. * * @see Documentation @@ -198,13 +188,7 @@ public CompletableFuture createRepository(CreateReposi } /** - * Create or update a snapshot repository. IMPORTANT: If you are migrating - * searchable snapshots, the repository name must be identical in the source and - * destination clusters. To register a snapshot repository, the cluster's global - * metadata must be writeable. Ensure there are no cluster blocks (for example, - * cluster.blocks.read_only and - * clsuter.blocks.read_only_allow_delete settings) that prevent - * write access. + * Creates a repository. * * @param fn * a function that initializes a builder to create the @@ -222,7 +206,7 @@ public final CompletableFuture createRepository( // ----- Endpoint: snapshot.delete /** - * Delete snapshots. + * Deletes one or more snapshots. * * @see Documentation @@ -237,7 +221,7 @@ public CompletableFuture delete(DeleteSnapshotRequest re } /** - * Delete snapshots. + * Deletes one or more snapshots. * * @param fn * a function that initializes a builder to create the @@ -255,10 +239,7 @@ public final CompletableFuture delete( // ----- Endpoint: snapshot.delete_repository /** - * Delete snapshot repositories. When a repository is unregistered, - * Elasticsearch removes only the reference to the location where the repository - * is storing the snapshots. The snapshots themselves are left untouched and in - * place. + * Deletes a repository. * * @see Documentation @@ -273,10 +254,7 @@ public CompletableFuture deleteRepository(DeleteReposi } /** - * Delete snapshot repositories. When a repository is unregistered, - * Elasticsearch removes only the reference to the location where the repository - * is storing the snapshots. The snapshots themselves are left untouched and in - * place. + * Deletes a repository. * * @param fn * a function that initializes a builder to create the @@ -294,7 +272,7 @@ public final CompletableFuture deleteRepository( // ----- Endpoint: snapshot.get /** - * Get snapshot information. + * Returns information about a snapshot. * * @see Documentation @@ -309,7 +287,7 @@ public CompletableFuture get(GetSnapshotRequest request) { } /** - * Get snapshot information. + * Returns information about a snapshot. * * @param fn * a function that initializes a builder to create the @@ -327,7 +305,7 @@ public final CompletableFuture get( // ----- Endpoint: snapshot.get_repository /** - * Get snapshot repository information. + * Returns information about a repository. * * @see Documentation @@ -342,7 +320,7 @@ public CompletableFuture getRepository(GetRepositoryReque } /** - * Get snapshot repository information. + * Returns information about a repository. * * @param fn * a function that initializes a builder to create the @@ -358,7 +336,7 @@ public final CompletableFuture getRepository( } /** - * Get snapshot repository information. + * Returns information about a repository. * * @see Documentation @@ -373,56 +351,7 @@ public CompletableFuture getRepository() { // ----- Endpoint: snapshot.repository_verify_integrity /** - * Verify the repository integrity. Verify the integrity of the contents of a - * snapshot repository. - *

- * This API enables you to perform a comprehensive check of the contents of a - * repository, looking for any anomalies in its data or metadata which might - * prevent you from restoring snapshots from the repository or which might cause - * future snapshot create or delete operations to fail. - *

- * If you suspect the integrity of the contents of one of your snapshot - * repositories, cease all write activity to this repository immediately, set - * its read_only option to true, and use this API to - * verify its integrity. Until you do so: - *

- *

- * If the API finds any problems with the integrity of the contents of your - * repository, Elasticsearch will not be able to repair the damage. The only way - * to bring the repository back into a fully working state after its contents - * have been damaged is by restoring its contents from a repository backup which - * was taken before the damage occurred. You must also identify what caused the - * damage and take action to prevent it from happening again. - *

- * If you cannot restore a repository backup, register a new repository and use - * this for all future snapshot operations. In some cases it may be possible to - * recover some of the contents of a damaged repository, either by restoring as - * many of its snapshots as needed and taking new snapshots of the restored - * data, or by using the reindex API to copy data from any searchable snapshots - * mounted from the damaged repository. - *

- * Avoid all operations which write to the repository while the verify - * repository integrity API is running. If something changes the repository - * contents while an integrity verification is running then Elasticsearch may - * incorrectly report having detected some anomalies in its contents due to the - * concurrent writes. It may also incorrectly fail to report some anomalies that - * the concurrent writes prevented it from detecting. - *

- * NOTE: This API is intended for exploratory use by humans. You should expect - * the request parameters and the response format to vary in future versions. - *

- * NOTE: This API may not work correctly in a mixed-version cluster. + * Verifies the integrity of the contents of a snapshot repository * * @see Documentation @@ -438,56 +367,7 @@ public CompletableFuture repositoryVerifyInte } /** - * Verify the repository integrity. Verify the integrity of the contents of a - * snapshot repository. - *

- * This API enables you to perform a comprehensive check of the contents of a - * repository, looking for any anomalies in its data or metadata which might - * prevent you from restoring snapshots from the repository or which might cause - * future snapshot create or delete operations to fail. - *

- * If you suspect the integrity of the contents of one of your snapshot - * repositories, cease all write activity to this repository immediately, set - * its read_only option to true, and use this API to - * verify its integrity. Until you do so: - *

- *

- * If the API finds any problems with the integrity of the contents of your - * repository, Elasticsearch will not be able to repair the damage. The only way - * to bring the repository back into a fully working state after its contents - * have been damaged is by restoring its contents from a repository backup which - * was taken before the damage occurred. You must also identify what caused the - * damage and take action to prevent it from happening again. - *

- * If you cannot restore a repository backup, register a new repository and use - * this for all future snapshot operations. In some cases it may be possible to - * recover some of the contents of a damaged repository, either by restoring as - * many of its snapshots as needed and taking new snapshots of the restored - * data, or by using the reindex API to copy data from any searchable snapshots - * mounted from the damaged repository. - *

- * Avoid all operations which write to the repository while the verify - * repository integrity API is running. If something changes the repository - * contents while an integrity verification is running then Elasticsearch may - * incorrectly report having detected some anomalies in its contents due to the - * concurrent writes. It may also incorrectly fail to report some anomalies that - * the concurrent writes prevented it from detecting. - *

- * NOTE: This API is intended for exploratory use by humans. You should expect - * the request parameters and the response format to vary in future versions. - *

- * NOTE: This API may not work correctly in a mixed-version cluster. + * Verifies the integrity of the contents of a snapshot repository * * @param fn * a function that initializes a builder to create the @@ -505,32 +385,7 @@ public final CompletableFuture repositoryVeri // ----- Endpoint: snapshot.restore /** - * Restore a snapshot. Restore a snapshot of a cluster or data streams and - * indices. - *

- * You can restore a snapshot only to a running cluster with an elected master - * node. The snapshot repository must be registered and available to the - * cluster. The snapshot and cluster versions must be compatible. - *

- * To restore a snapshot, the cluster's global metadata must be writable. Ensure - * there are't any cluster blocks that prevent writes. The restore operation - * ignores index blocks. - *

- * Before you restore a data stream, ensure the cluster contains a matching - * index template with data streams enabled. To check, use the index management - * feature in Kibana or the get index template API: - * - *

-	 * GET _index_template/*?filter_path=index_templates.name,index_templates.index_template.index_patterns,index_templates.index_template.data_stream
-	 * 
-	 * 
- *

- * If no such template exists, you can create one or restore a cluster state - * that contains one. Without a matching index template, a data stream can't - * roll over or create backing indices. - *

- * If your snapshot contains data from App Search or Workplace Search, you must - * restore the Enterprise Search encryption key before you restore the snapshot. + * Restores a snapshot. * * @see Documentation @@ -545,32 +400,7 @@ public CompletableFuture restore(RestoreRequest request) { } /** - * Restore a snapshot. Restore a snapshot of a cluster or data streams and - * indices. - *

- * You can restore a snapshot only to a running cluster with an elected master - * node. The snapshot repository must be registered and available to the - * cluster. The snapshot and cluster versions must be compatible. - *

- * To restore a snapshot, the cluster's global metadata must be writable. Ensure - * there are't any cluster blocks that prevent writes. The restore operation - * ignores index blocks. - *

- * Before you restore a data stream, ensure the cluster contains a matching - * index template with data streams enabled. To check, use the index management - * feature in Kibana or the get index template API: - * - *

-	 * GET _index_template/*?filter_path=index_templates.name,index_templates.index_template.index_patterns,index_templates.index_template.data_stream
-	 * 
-	 * 
- *

- * If no such template exists, you can create one or restore a cluster state - * that contains one. Without a matching index template, a data stream can't - * roll over or create backing indices. - *

- * If your snapshot contains data from App Search or Workplace Search, you must - * restore the Enterprise Search encryption key before you restore the snapshot. + * Restores a snapshot. * * @param fn * a function that initializes a builder to create the @@ -588,21 +418,7 @@ public final CompletableFuture restore( // ----- Endpoint: snapshot.status /** - * Get the snapshot status. Get a detailed description of the current state for - * each shard participating in the snapshot. Note that this API should be used - * only to obtain detailed shard-level information for ongoing snapshots. If - * this detail is not needed or you want to obtain information about one or more - * existing snapshots, use the get snapshot API. - *

- * WARNING: Using the API to return the status of any snapshots other than - * currently running snapshots can be expensive. The API requires a read from - * the repository for each shard in each snapshot. For example, if you have 100 - * snapshots with 1,000 shards each, an API request that includes all snapshots - * will require 100,000 reads (100 snapshots x 1,000 shards). - *

- * Depending on the latency of your storage, such requests can take an extremely - * long time to return results. These requests can also tax machine resources - * and, when using cloud storage, incur high processing costs. + * Returns information about the status of a snapshot. * * @see Documentation @@ -617,21 +433,7 @@ public CompletableFuture status(SnapshotStatusRequest re } /** - * Get the snapshot status. Get a detailed description of the current state for - * each shard participating in the snapshot. Note that this API should be used - * only to obtain detailed shard-level information for ongoing snapshots. If - * this detail is not needed or you want to obtain information about one or more - * existing snapshots, use the get snapshot API. - *

- * WARNING: Using the API to return the status of any snapshots other than - * currently running snapshots can be expensive. The API requires a read from - * the repository for each shard in each snapshot. For example, if you have 100 - * snapshots with 1,000 shards each, an API request that includes all snapshots - * will require 100,000 reads (100 snapshots x 1,000 shards). - *

- * Depending on the latency of your storage, such requests can take an extremely - * long time to return results. These requests can also tax machine resources - * and, when using cloud storage, incur high processing costs. + * Returns information about the status of a snapshot. * * @param fn * a function that initializes a builder to create the @@ -647,21 +449,7 @@ public final CompletableFuture status( } /** - * Get the snapshot status. Get a detailed description of the current state for - * each shard participating in the snapshot. Note that this API should be used - * only to obtain detailed shard-level information for ongoing snapshots. If - * this detail is not needed or you want to obtain information about one or more - * existing snapshots, use the get snapshot API. - *

- * WARNING: Using the API to return the status of any snapshots other than - * currently running snapshots can be expensive. The API requires a read from - * the repository for each shard in each snapshot. For example, if you have 100 - * snapshots with 1,000 shards each, an API request that includes all snapshots - * will require 100,000 reads (100 snapshots x 1,000 shards). - *

- * Depending on the latency of your storage, such requests can take an extremely - * long time to return results. These requests can also tax machine resources - * and, when using cloud storage, incur high processing costs. + * Returns information about the status of a snapshot. * * @see Documentation @@ -676,8 +464,7 @@ public CompletableFuture status() { // ----- Endpoint: snapshot.verify_repository /** - * Verify a snapshot repository. Check for common misconfigurations in a - * snapshot repository. + * Verifies a repository. * * @see Documentation @@ -692,8 +479,7 @@ public CompletableFuture verifyRepository(VerifyReposi } /** - * Verify a snapshot repository. Check for common misconfigurations in a - * snapshot repository. + * Verifies a repository. * * @param fn * a function that initializes a builder to create the diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/snapshot/ElasticsearchSnapshotClient.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/snapshot/ElasticsearchSnapshotClient.java index 2866e1630..8e0b2dee8 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/snapshot/ElasticsearchSnapshotClient.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/snapshot/ElasticsearchSnapshotClient.java @@ -68,9 +68,8 @@ public ElasticsearchSnapshotClient withTransportOptions(@Nullable TransportOptio // ----- Endpoint: snapshot.cleanup_repository /** - * Clean up the snapshot repository. Trigger the review of the contents of a - * snapshot repository and delete any stale data not referenced by existing - * snapshots. + * Triggers the review of a snapshot repository’s contents and deletes any stale + * data not referenced by existing snapshots. * * @see Documentation @@ -86,9 +85,8 @@ public CleanupRepositoryResponse cleanupRepository(CleanupRepositoryRequest requ } /** - * Clean up the snapshot repository. Trigger the review of the contents of a - * snapshot repository and delete any stale data not referenced by existing - * snapshots. + * Triggers the review of a snapshot repository’s contents and deletes any stale + * data not referenced by existing snapshots. * * @param fn * a function that initializes a builder to create the @@ -107,8 +105,8 @@ public final CleanupRepositoryResponse cleanupRepository( // ----- Endpoint: snapshot.clone /** - * Clone a snapshot. Clone part of all of a snapshot into another snapshot in - * the same repository. + * Clones indices from one snapshot into another snapshot in the same + * repository. * * @see Documentation @@ -123,8 +121,8 @@ public CloneSnapshotResponse clone(CloneSnapshotRequest request) throws IOExcept } /** - * Clone a snapshot. Clone part of all of a snapshot into another snapshot in - * the same repository. + * Clones indices from one snapshot into another snapshot in the same + * repository. * * @param fn * a function that initializes a builder to create the @@ -143,8 +141,7 @@ public final CloneSnapshotResponse clone( // ----- Endpoint: snapshot.create /** - * Create a snapshot. Take a snapshot of a cluster or of data streams and - * indices. + * Creates a snapshot in a repository. * * @see Documentation @@ -159,8 +156,7 @@ public CreateSnapshotResponse create(CreateSnapshotRequest request) throws IOExc } /** - * Create a snapshot. Take a snapshot of a cluster or of data streams and - * indices. + * Creates a snapshot in a repository. * * @param fn * a function that initializes a builder to create the @@ -179,13 +175,7 @@ public final CreateSnapshotResponse create( // ----- Endpoint: snapshot.create_repository /** - * Create or update a snapshot repository. IMPORTANT: If you are migrating - * searchable snapshots, the repository name must be identical in the source and - * destination clusters. To register a snapshot repository, the cluster's global - * metadata must be writeable. Ensure there are no cluster blocks (for example, - * cluster.blocks.read_only and - * clsuter.blocks.read_only_allow_delete settings) that prevent - * write access. + * Creates a repository. * * @see Documentation @@ -201,13 +191,7 @@ public CreateRepositoryResponse createRepository(CreateRepositoryRequest request } /** - * Create or update a snapshot repository. IMPORTANT: If you are migrating - * searchable snapshots, the repository name must be identical in the source and - * destination clusters. To register a snapshot repository, the cluster's global - * metadata must be writeable. Ensure there are no cluster blocks (for example, - * cluster.blocks.read_only and - * clsuter.blocks.read_only_allow_delete settings) that prevent - * write access. + * Creates a repository. * * @param fn * a function that initializes a builder to create the @@ -226,7 +210,7 @@ public final CreateRepositoryResponse createRepository( // ----- Endpoint: snapshot.delete /** - * Delete snapshots. + * Deletes one or more snapshots. * * @see Documentation @@ -241,7 +225,7 @@ public DeleteSnapshotResponse delete(DeleteSnapshotRequest request) throws IOExc } /** - * Delete snapshots. + * Deletes one or more snapshots. * * @param fn * a function that initializes a builder to create the @@ -260,10 +244,7 @@ public final DeleteSnapshotResponse delete( // ----- Endpoint: snapshot.delete_repository /** - * Delete snapshot repositories. When a repository is unregistered, - * Elasticsearch removes only the reference to the location where the repository - * is storing the snapshots. The snapshots themselves are left untouched and in - * place. + * Deletes a repository. * * @see Documentation @@ -279,10 +260,7 @@ public DeleteRepositoryResponse deleteRepository(DeleteRepositoryRequest request } /** - * Delete snapshot repositories. When a repository is unregistered, - * Elasticsearch removes only the reference to the location where the repository - * is storing the snapshots. The snapshots themselves are left untouched and in - * place. + * Deletes a repository. * * @param fn * a function that initializes a builder to create the @@ -301,7 +279,7 @@ public final DeleteRepositoryResponse deleteRepository( // ----- Endpoint: snapshot.get /** - * Get snapshot information. + * Returns information about a snapshot. * * @see Documentation @@ -316,7 +294,7 @@ public GetSnapshotResponse get(GetSnapshotRequest request) throws IOException, E } /** - * Get snapshot information. + * Returns information about a snapshot. * * @param fn * a function that initializes a builder to create the @@ -334,7 +312,7 @@ public final GetSnapshotResponse get(FunctionDocumentation @@ -350,7 +328,7 @@ public GetRepositoryResponse getRepository(GetRepositoryRequest request) } /** - * Get snapshot repository information. + * Returns information about a repository. * * @param fn * a function that initializes a builder to create the @@ -367,7 +345,7 @@ public final GetRepositoryResponse getRepository( } /** - * Get snapshot repository information. + * Returns information about a repository. * * @see Documentation @@ -382,56 +360,7 @@ public GetRepositoryResponse getRepository() throws IOException, ElasticsearchEx // ----- Endpoint: snapshot.repository_verify_integrity /** - * Verify the repository integrity. Verify the integrity of the contents of a - * snapshot repository. - *

- * This API enables you to perform a comprehensive check of the contents of a - * repository, looking for any anomalies in its data or metadata which might - * prevent you from restoring snapshots from the repository or which might cause - * future snapshot create or delete operations to fail. - *

- * If you suspect the integrity of the contents of one of your snapshot - * repositories, cease all write activity to this repository immediately, set - * its read_only option to true, and use this API to - * verify its integrity. Until you do so: - *

- *

- * If the API finds any problems with the integrity of the contents of your - * repository, Elasticsearch will not be able to repair the damage. The only way - * to bring the repository back into a fully working state after its contents - * have been damaged is by restoring its contents from a repository backup which - * was taken before the damage occurred. You must also identify what caused the - * damage and take action to prevent it from happening again. - *

- * If you cannot restore a repository backup, register a new repository and use - * this for all future snapshot operations. In some cases it may be possible to - * recover some of the contents of a damaged repository, either by restoring as - * many of its snapshots as needed and taking new snapshots of the restored - * data, or by using the reindex API to copy data from any searchable snapshots - * mounted from the damaged repository. - *

- * Avoid all operations which write to the repository while the verify - * repository integrity API is running. If something changes the repository - * contents while an integrity verification is running then Elasticsearch may - * incorrectly report having detected some anomalies in its contents due to the - * concurrent writes. It may also incorrectly fail to report some anomalies that - * the concurrent writes prevented it from detecting. - *

- * NOTE: This API is intended for exploratory use by humans. You should expect - * the request parameters and the response format to vary in future versions. - *

- * NOTE: This API may not work correctly in a mixed-version cluster. + * Verifies the integrity of the contents of a snapshot repository * * @see Documentation @@ -447,56 +376,7 @@ public RepositoryVerifyIntegrityResponse repositoryVerifyIntegrity(RepositoryVer } /** - * Verify the repository integrity. Verify the integrity of the contents of a - * snapshot repository. - *

- * This API enables you to perform a comprehensive check of the contents of a - * repository, looking for any anomalies in its data or metadata which might - * prevent you from restoring snapshots from the repository or which might cause - * future snapshot create or delete operations to fail. - *

- * If you suspect the integrity of the contents of one of your snapshot - * repositories, cease all write activity to this repository immediately, set - * its read_only option to true, and use this API to - * verify its integrity. Until you do so: - *

- *

- * If the API finds any problems with the integrity of the contents of your - * repository, Elasticsearch will not be able to repair the damage. The only way - * to bring the repository back into a fully working state after its contents - * have been damaged is by restoring its contents from a repository backup which - * was taken before the damage occurred. You must also identify what caused the - * damage and take action to prevent it from happening again. - *

- * If you cannot restore a repository backup, register a new repository and use - * this for all future snapshot operations. In some cases it may be possible to - * recover some of the contents of a damaged repository, either by restoring as - * many of its snapshots as needed and taking new snapshots of the restored - * data, or by using the reindex API to copy data from any searchable snapshots - * mounted from the damaged repository. - *

- * Avoid all operations which write to the repository while the verify - * repository integrity API is running. If something changes the repository - * contents while an integrity verification is running then Elasticsearch may - * incorrectly report having detected some anomalies in its contents due to the - * concurrent writes. It may also incorrectly fail to report some anomalies that - * the concurrent writes prevented it from detecting. - *

- * NOTE: This API is intended for exploratory use by humans. You should expect - * the request parameters and the response format to vary in future versions. - *

- * NOTE: This API may not work correctly in a mixed-version cluster. + * Verifies the integrity of the contents of a snapshot repository * * @param fn * a function that initializes a builder to create the @@ -515,32 +395,7 @@ public final RepositoryVerifyIntegrityResponse repositoryVerifyIntegrity( // ----- Endpoint: snapshot.restore /** - * Restore a snapshot. Restore a snapshot of a cluster or data streams and - * indices. - *

- * You can restore a snapshot only to a running cluster with an elected master - * node. The snapshot repository must be registered and available to the - * cluster. The snapshot and cluster versions must be compatible. - *

- * To restore a snapshot, the cluster's global metadata must be writable. Ensure - * there are't any cluster blocks that prevent writes. The restore operation - * ignores index blocks. - *

- * Before you restore a data stream, ensure the cluster contains a matching - * index template with data streams enabled. To check, use the index management - * feature in Kibana or the get index template API: - * - *

-	 * GET _index_template/*?filter_path=index_templates.name,index_templates.index_template.index_patterns,index_templates.index_template.data_stream
-	 * 
-	 * 
- *

- * If no such template exists, you can create one or restore a cluster state - * that contains one. Without a matching index template, a data stream can't - * roll over or create backing indices. - *

- * If your snapshot contains data from App Search or Workplace Search, you must - * restore the Enterprise Search encryption key before you restore the snapshot. + * Restores a snapshot. * * @see Documentation @@ -555,32 +410,7 @@ public RestoreResponse restore(RestoreRequest request) throws IOException, Elast } /** - * Restore a snapshot. Restore a snapshot of a cluster or data streams and - * indices. - *

- * You can restore a snapshot only to a running cluster with an elected master - * node. The snapshot repository must be registered and available to the - * cluster. The snapshot and cluster versions must be compatible. - *

- * To restore a snapshot, the cluster's global metadata must be writable. Ensure - * there are't any cluster blocks that prevent writes. The restore operation - * ignores index blocks. - *

- * Before you restore a data stream, ensure the cluster contains a matching - * index template with data streams enabled. To check, use the index management - * feature in Kibana or the get index template API: - * - *

-	 * GET _index_template/*?filter_path=index_templates.name,index_templates.index_template.index_patterns,index_templates.index_template.data_stream
-	 * 
-	 * 
- *

- * If no such template exists, you can create one or restore a cluster state - * that contains one. Without a matching index template, a data stream can't - * roll over or create backing indices. - *

- * If your snapshot contains data from App Search or Workplace Search, you must - * restore the Enterprise Search encryption key before you restore the snapshot. + * Restores a snapshot. * * @param fn * a function that initializes a builder to create the @@ -598,21 +428,7 @@ public final RestoreResponse restore(Function - * WARNING: Using the API to return the status of any snapshots other than - * currently running snapshots can be expensive. The API requires a read from - * the repository for each shard in each snapshot. For example, if you have 100 - * snapshots with 1,000 shards each, an API request that includes all snapshots - * will require 100,000 reads (100 snapshots x 1,000 shards). - *

- * Depending on the latency of your storage, such requests can take an extremely - * long time to return results. These requests can also tax machine resources - * and, when using cloud storage, incur high processing costs. + * Returns information about the status of a snapshot. * * @see Documentation @@ -627,21 +443,7 @@ public SnapshotStatusResponse status(SnapshotStatusRequest request) throws IOExc } /** - * Get the snapshot status. Get a detailed description of the current state for - * each shard participating in the snapshot. Note that this API should be used - * only to obtain detailed shard-level information for ongoing snapshots. If - * this detail is not needed or you want to obtain information about one or more - * existing snapshots, use the get snapshot API. - *

- * WARNING: Using the API to return the status of any snapshots other than - * currently running snapshots can be expensive. The API requires a read from - * the repository for each shard in each snapshot. For example, if you have 100 - * snapshots with 1,000 shards each, an API request that includes all snapshots - * will require 100,000 reads (100 snapshots x 1,000 shards). - *

- * Depending on the latency of your storage, such requests can take an extremely - * long time to return results. These requests can also tax machine resources - * and, when using cloud storage, incur high processing costs. + * Returns information about the status of a snapshot. * * @param fn * a function that initializes a builder to create the @@ -658,21 +460,7 @@ public final SnapshotStatusResponse status( } /** - * Get the snapshot status. Get a detailed description of the current state for - * each shard participating in the snapshot. Note that this API should be used - * only to obtain detailed shard-level information for ongoing snapshots. If - * this detail is not needed or you want to obtain information about one or more - * existing snapshots, use the get snapshot API. - *

- * WARNING: Using the API to return the status of any snapshots other than - * currently running snapshots can be expensive. The API requires a read from - * the repository for each shard in each snapshot. For example, if you have 100 - * snapshots with 1,000 shards each, an API request that includes all snapshots - * will require 100,000 reads (100 snapshots x 1,000 shards). - *

- * Depending on the latency of your storage, such requests can take an extremely - * long time to return results. These requests can also tax machine resources - * and, when using cloud storage, incur high processing costs. + * Returns information about the status of a snapshot. * * @see Documentation @@ -687,8 +475,7 @@ public SnapshotStatusResponse status() throws IOException, ElasticsearchExceptio // ----- Endpoint: snapshot.verify_repository /** - * Verify a snapshot repository. Check for common misconfigurations in a - * snapshot repository. + * Verifies a repository. * * @see Documentation @@ -704,8 +491,7 @@ public VerifyRepositoryResponse verifyRepository(VerifyRepositoryRequest request } /** - * Verify a snapshot repository. Check for common misconfigurations in a - * snapshot repository. + * Verifies a repository. * * @param fn * a function that initializes a builder to create the diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/snapshot/GetRepositoryRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/snapshot/GetRepositoryRequest.java index 305661c71..55a08eeba 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/snapshot/GetRepositoryRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/snapshot/GetRepositoryRequest.java @@ -59,7 +59,7 @@ // typedef: snapshot.get_repository.Request /** - * Get snapshot repository information. + * Returns information about a repository. * * @see API * specification diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/snapshot/GetSnapshotRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/snapshot/GetSnapshotRequest.java index b62955a32..90544d289 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/snapshot/GetSnapshotRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/snapshot/GetSnapshotRequest.java @@ -61,7 +61,7 @@ // typedef: snapshot.get.Request /** - * Get snapshot information. + * Returns information about a snapshot. * * @see API * specification diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/snapshot/RepositoryVerifyIntegrityRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/snapshot/RepositoryVerifyIntegrityRequest.java index 632463a4e..a71c3ea66 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/snapshot/RepositoryVerifyIntegrityRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/snapshot/RepositoryVerifyIntegrityRequest.java @@ -59,56 +59,7 @@ // typedef: snapshot.repository_verify_integrity.Request /** - * Verify the repository integrity. Verify the integrity of the contents of a - * snapshot repository. - *

- * This API enables you to perform a comprehensive check of the contents of a - * repository, looking for any anomalies in its data or metadata which might - * prevent you from restoring snapshots from the repository or which might cause - * future snapshot create or delete operations to fail. - *

- * If you suspect the integrity of the contents of one of your snapshot - * repositories, cease all write activity to this repository immediately, set - * its read_only option to true, and use this API to - * verify its integrity. Until you do so: - *

    - *
  • It may not be possible to restore some snapshots from this - * repository.
  • - *
  • Searchable snapshots may report errors when searched or may have - * unassigned shards.
  • - *
  • Taking snapshots into this repository may fail or may appear to succeed - * but have created a snapshot which cannot be restored.
  • - *
  • Deleting snapshots from this repository may fail or may appear to succeed - * but leave the underlying data on disk.
  • - *
  • Continuing to write to the repository while it is in an invalid state may - * causing additional damage to its contents.
  • - *
- *

- * If the API finds any problems with the integrity of the contents of your - * repository, Elasticsearch will not be able to repair the damage. The only way - * to bring the repository back into a fully working state after its contents - * have been damaged is by restoring its contents from a repository backup which - * was taken before the damage occurred. You must also identify what caused the - * damage and take action to prevent it from happening again. - *

- * If you cannot restore a repository backup, register a new repository and use - * this for all future snapshot operations. In some cases it may be possible to - * recover some of the contents of a damaged repository, either by restoring as - * many of its snapshots as needed and taking new snapshots of the restored - * data, or by using the reindex API to copy data from any searchable snapshots - * mounted from the damaged repository. - *

- * Avoid all operations which write to the repository while the verify - * repository integrity API is running. If something changes the repository - * contents while an integrity verification is running then Elasticsearch may - * incorrectly report having detected some anomalies in its contents due to the - * concurrent writes. It may also incorrectly fail to report some anomalies that - * the concurrent writes prevented it from detecting. - *

- * NOTE: This API is intended for exploratory use by humans. You should expect - * the request parameters and the response format to vary in future versions. - *

- * NOTE: This API may not work correctly in a mixed-version cluster. + * Verifies the integrity of the contents of a snapshot repository * * @see API diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/snapshot/RestoreRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/snapshot/RestoreRequest.java index 509e7f8a9..1176d6dac 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/snapshot/RestoreRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/snapshot/RestoreRequest.java @@ -61,32 +61,7 @@ // typedef: snapshot.restore.Request /** - * Restore a snapshot. Restore a snapshot of a cluster or data streams and - * indices. - *

- * You can restore a snapshot only to a running cluster with an elected master - * node. The snapshot repository must be registered and available to the - * cluster. The snapshot and cluster versions must be compatible. - *

- * To restore a snapshot, the cluster's global metadata must be writable. Ensure - * there are't any cluster blocks that prevent writes. The restore operation - * ignores index blocks. - *

- * Before you restore a data stream, ensure the cluster contains a matching - * index template with data streams enabled. To check, use the index management - * feature in Kibana or the get index template API: - * - *

- * GET _index_template/*?filter_path=index_templates.name,index_templates.index_template.index_patterns,index_templates.index_template.data_stream
- * 
- * 
- *

- * If no such template exists, you can create one or restore a cluster state - * that contains one. Without a matching index template, a data stream can't - * roll over or create backing indices. - *

- * If your snapshot contains data from App Search or Workplace Search, you must - * restore the Enterprise Search encryption key before you restore the snapshot. + * Restores a snapshot. * * @see API * specification diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/snapshot/SnapshotStatusRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/snapshot/SnapshotStatusRequest.java index 45641441c..c9da0634a 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/snapshot/SnapshotStatusRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/snapshot/SnapshotStatusRequest.java @@ -59,21 +59,7 @@ // typedef: snapshot.status.Request /** - * Get the snapshot status. Get a detailed description of the current state for - * each shard participating in the snapshot. Note that this API should be used - * only to obtain detailed shard-level information for ongoing snapshots. If - * this detail is not needed or you want to obtain information about one or more - * existing snapshots, use the get snapshot API. - *

- * WARNING: Using the API to return the status of any snapshots other than - * currently running snapshots can be expensive. The API requires a read from - * the repository for each shard in each snapshot. For example, if you have 100 - * snapshots with 1,000 shards each, an API request that includes all snapshots - * will require 100,000 reads (100 snapshots x 1,000 shards). - *

- * Depending on the latency of your storage, such requests can take an extremely - * long time to return results. These requests can also tax machine resources - * and, when using cloud storage, incur high processing costs. + * Returns information about the status of a snapshot. * * @see API * specification diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/snapshot/VerifyRepositoryRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/snapshot/VerifyRepositoryRequest.java index 2371c4f0f..fffc7bc18 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/snapshot/VerifyRepositoryRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/snapshot/VerifyRepositoryRequest.java @@ -56,8 +56,7 @@ // typedef: snapshot.verify_repository.Request /** - * Verify a snapshot repository. Check for common misconfigurations in a - * snapshot repository. + * Verifies a repository. * * @see API diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/watcher/Schedule.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/watcher/Schedule.java index 115bdfb4b..ed6ec0005 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/watcher/Schedule.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/watcher/Schedule.java @@ -76,8 +76,6 @@ public class Schedule implements TaggedUnion, TriggerVari */ public enum Kind implements JsonEnum { - Timezone("timezone"), - Cron("cron"), Daily("daily"), @@ -145,23 +143,6 @@ public static Schedule of(Function> fn) { return fn.apply(new Builder()).build(); } - /** - * Is this variant instance of kind {@code timezone}? - */ - public boolean isTimezone() { - return _kind == Kind.Timezone; - } - - /** - * Get the {@code timezone} variant value. - * - * @throws IllegalStateException - * if the current variant is not of the {@code timezone} kind. - */ - public String timezone() { - return TaggedUnionUtils.get(this, Kind.Timezone); - } - /** * Is this variant instance of kind {@code cron}? */ @@ -292,10 +273,6 @@ public void serialize(JsonGenerator generator, JsonpMapper mapper) { ((JsonpSerializable) _value).serialize(generator, mapper); } else { switch (_kind) { - case Timezone : - generator.write(((String) this._value)); - - break; case Cron : generator.write(((String) this._value)); @@ -347,12 +324,6 @@ public static class Builder extends WithJsonObjectBuilderBase implement protected Builder self() { return this; } - public ObjectBuilder timezone(String v) { - this._kind = Kind.Timezone; - this._value = v; - return this; - } - public ObjectBuilder cron(String v) { this._kind = Kind.Cron; this._value = v; @@ -416,7 +387,6 @@ public Schedule build() { protected static void setupScheduleDeserializer(ObjectDeserializer op) { - op.add(Builder::timezone, JsonpDeserializer.stringDeserializer(), "timezone"); op.add(Builder::cron, JsonpDeserializer.stringDeserializer(), "cron"); op.add(Builder::daily, DailySchedule._DESERIALIZER, "daily"); op.add(Builder::hourly, HourlySchedule._DESERIALIZER, "hourly"); diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/watcher/ScheduleBuilders.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/watcher/ScheduleBuilders.java index f69090877..6f70cced3 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/watcher/ScheduleBuilders.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/watcher/ScheduleBuilders.java @@ -41,9 +41,9 @@ /** * Builders for {@link Schedule} variants. *

- * Variants timezone, cron, monthly, - * weekly, yearly are not available here as they don't - * have a dedicated class. Use {@link Schedule}'s builder for these. + * Variants cron, monthly, weekly, + * yearly are not available here as they don't have a dedicated + * class. Use {@link Schedule}'s builder for these. * */ public class ScheduleBuilders {