@lap v0.3
# Machine-readable API spec. Each @endpoint block is one API call.
@api Elasticsearch Request & Response Specification
@auth ApiKey ids in path
@endpoints 824
@hint download_for_search
@toc _async_search(4), {index}(123), _bulk(2), _cat(47), _ccr(7), _search(10), _pit(1), _cluster(15), _component_template(6), _info(1), _remote(1), _connector(30), _count(2), _dangling(3), _delete_by_query(1), _scripts(8), _enrich(6), _eql(3), _query(7), _features(2), _field_caps(2), _fleet(2), _script_context(1), _script_language(1), _health_report(2), _ilm(9), _analyze(2), _migration(6), _cache(1), _data_stream(19), _create_from(2), _index_template(9), _template(6), _alias(3), _flush(2), _forcemerge(1), _lifecycle(1), _mapping(2), _settings(3), _recovery(1), _refresh(2), _resolve(4), {alias}(2), _segments(1), _shard_stores(1), _stats(2), _aliases(1), _validate(2), _inference(45), root(2), _ingest(22), _license(8), _logstash(4), _mget(2), _ml(107), _msearch(4), _mtermvectors(2), _nodes(20), _query_rules(8), _rank_eval(2), _reindex(2), _render(4), _rollup(8), _application(12), _search_shards(2), _searchable_snapshots(4), _snapshot(19), _security(95), _slm(10), _sql(8), _ssl(1), _streams(3), _synonyms(7), _tasks(4), _text_structure(6), _transform(17), _update_by_query(1), _watcher(24), _xpack(2)

@group _async_search
@endpoint GET /_async_search/{id}
@desc Get async search results
@required {id: str # A unique identifier for the async search.}
@optional {keep_alive: any # The length of time that the async search should be available in the cluster. When not specified, the `keep_alive` set with the corresponding submit async request will be used. Otherwise, it is possible to override the value and extend the validity of the request. When this period expires, the search, if still running, is cancelled. If the search is completed, its saved results are deleted., typed_keys: bool # Specify whether aggregation and suggester names should be prefixed by their respective types in the response, wait_for_completion_timeout: any # Specifies to wait for the search to be completed up until the provided timeout. Final results will be returned if available before the timeout expires, otherwise the currently available results will be returned once the timeout expires. By default no timeout is set meaning that the currently available results will be returned without any additional wait., return_intermediate_results: bool # Specifies whether the response should contain intermediate results if the query is still running when the wait_for_completion_timeout expires or if no wait_for_completion_timeout is specified. If true and the search is still running, the search response will include any hits and partial aggregations that are available. If false and the search is still running, the search response will not include any hits (but possibly include total hits) nor will include any partial aggregations. When not specified, the intermediate results are returned for running queries.}
@returns(200)

@endpoint DELETE /_async_search/{id}
@desc Delete an async search
@required {id: str # A unique identifier for the async search.}
@returns(200) {acknowledged: bool}

@endpoint GET /_async_search/status/{id}
@desc Get the async search status
@required {id: str # A unique identifier for the async search.}
@optional {keep_alive: any # The length of time that the async search needs to be available. Ongoing async searches and any saved search results are deleted after this period.}
@returns(200)

@endpoint POST /_async_search
@desc Run an async search
@optional {wait_for_completion_timeout: any # Blocks and waits until the search is completed up to a certain timeout. When the async search completes within the timeout, the response won’t include the ID as the results are not stored in the cluster., keep_alive: any # Specifies how long the async search needs to be available. Ongoing async searches and any saved search results are deleted after this period., keep_on_completion: bool # If `true`, results are stored for later retrieval when the search completes within the `wait_for_completion_timeout`., allow_no_indices: bool # A setting that does two separate checks on the index expression. If `false`, the request returns an error (1) if any wildcard expression (including `_all` and `*`) resolves to zero matching indices or (2) if the complete set of resolved indices, aliases or data streams is empty after all expressions are evaluated. If `true`, index expressions that resolve to no indices are allowed and the request returns an empty result., allow_partial_search_results: bool # Indicate if an error should be returned if there is a partial search failure or timeout, analyzer: str # The analyzer to use for the query string, analyze_wildcard: bool # Specify whether wildcard and prefix queries should be analyzed, batched_reduce_size: num # Affects how often partial results become available, which happens whenever shard results are reduced. A partial reduction is performed every time the coordinating node has received a certain number of new shard responses (5 by default)., ccs_minimize_roundtrips: bool # The default value is the only supported value., default_operator: str # The default operator for query string query (AND or OR), df: str # The field to use as default where no field prefix is given in the query string, docvalue_fields: any # A comma-separated list of fields to return as the docvalue representation of a field for each hit, expand_wildcards: any # Whether to expand wildcard expression to concrete indices that are open, closed or both, explain: bool # Specify whether to return detailed information about score computation as part of a hit, ignore_throttled: bool # Whether specified concrete, expanded or aliased indices should be ignored when throttled, ignore_unavailable: bool # If `false`, the request returns an error if it targets a concrete (non-wildcarded) index, alias, or data stream that is missing, closed, or otherwise unavailable. If `true`, unavailable concrete targets are silently ignored., lenient: bool # Specify whether format-based query failures (such as providing text to a numeric field) should be ignored, max_concurrent_shard_requests: num # The number of concurrent shard requests per node this search executes concurrently. This value should be used to limit the impact of the search on the cluster in order to limit the number of concurrent shard requests, preference: str # Specify the node or shard the operation should be performed on, request_cache: bool # Specify if request cache should be used for this request or not, defaults to true, routing: any # A comma-separated list of specific routing values, search_type: str # Search operation type, stats: [str] # Specific 'tag' of the request for logging and statistical purposes, stored_fields: any # A comma-separated list of stored fields to return as part of a hit, suggest_field: str # Specifies which field to use for suggestions., suggest_mode: str # Specify suggest mode, suggest_size: num # How many suggestions to return in response, suggest_text: str # The source text for which the suggestions should be returned., terminate_after: num # The maximum number of documents to collect for each shard, upon reaching which the query execution will terminate early, timeout: any # Explicit operation timeout, track_total_hits: any # Indicate if the number of documents that match the query should be tracked. A number can also be specified, to accurately track the total hit count up to the number., track_scores: bool # Whether to calculate and return scores even if they are not used for sorting, typed_keys: bool # Specify whether aggregation and suggester names should be prefixed by their respective types in the response, rest_total_hits_as_int: bool # Indicates whether hits.total should be rendered as an integer or an object in the rest search response, version: bool # Specify whether to return document version as part of a hit, _source: any # True or false to return the _source field or not, or a list of fields to return, _source_excludes: any # A list of fields to exclude from the returned _source field, _source_includes: any # A list of fields to extract and return from the _source field, seq_no_primary_term: bool # Specify whether to return sequence number and primary term of the last modification of each hit, q: str # Query in the Lucene query string syntax, size: num # Number of hits to return, from: num # Starting offset, sort: any # A comma-separated list of : pairs, aggregations: map, collapse: any, explain: bool=false # If true, returns detailed information about score computation as part of a hit., ext: map # Configuration of search extensions defined by Elasticsearch plugins., from: num=0 # Starting document offset. By default, you cannot page through more than 10,000 hits using the from and size parameters. To page through more hits, use the search_after parameter., highlight: any, track_total_hits: any # Number of hits matching the query to count accurately. If true, the exact number of hits is returned at the cost of some performance. If false, the response does not include the total number of hits matching the query. Defaults to 10,000 hits., indices_boost: [map] # Boosts the _score of documents from specified indices., docvalue_fields: [map{field!: any, format: str, include_unmapped: bool}] # Array of wildcard (*) patterns. The request returns doc values for field names matching these patterns in the hits.fields property of the response., knn: any # Defines the approximate kNN search to run., min_score: num # Minimum _score for matching documents. Documents with a lower _score are not included in search results and results collected by aggregations., post_filter: any, profile: bool, query: any # Defines the search definition using the Query DSL., rescore: any, script_fields: map # Retrieve a script evaluation (based on different fields) for each hit., search_after: any, size: num=10 # The number of hits to return. By default, you cannot page through more than 10,000 hits using the from and size parameters. To page through more hits, use the search_after parameter., slice: any, sort: any, _source: any # Indicates which source fields are returned for matching documents. These fields are returned in the hits._source property of the search response., fields: [map{field!: any, format: str, include_unmapped: bool}] # Array of wildcard (*) patterns. The request returns values for field names matching these patterns in the hits.fields property of the response., suggest: any, terminate_after: num=0 # Maximum number of documents to collect for each shard. If a query reaches this limit, Elasticsearch terminates the query early. Elasticsearch collects documents before sorting. Defaults to 0, which does not terminate query execution early., timeout: str # Specifies the period of time to wait for a response from each shard. If no response is received before the timeout expires, the request fails and returns an error. Defaults to no timeout., track_scores: bool=false # If true, calculate and return document scores, even if the scores are not used for sorting., version: bool=false # If true, returns document version as part of a hit., seq_no_primary_term: bool # If true, returns sequence number and primary term of the last modification of each hit. See Optimistic concurrency control., stored_fields: any # List of stored fields to return as part of a hit. If no fields are specified, no stored fields are included in the response. If this field is specified, the _source parameter defaults to false. You can pass _source: true to return both source fields and stored fields in the search response., pit: any # Limits the search to a point in time (PIT). If you provide a PIT, you cannot specify an  in the request path., runtime_mappings: any # Defines one or more runtime fields in the search request. These fields take precedence over mapped fields with the same name., stats: [str] # Stats groups to associate with the search. Each group maintains a statistics aggregation for its associated searches. You can retrieve these stats using the indices stats API.}
@returns(200)
@example_request "{\n  \"sort\": [\n    { \"date\": { \"order\": \"asc\" } }\n  ],\n  \"aggs\": {\n    \"sale_date\": {\n      \"date_histogram\": {\n        \"field\": \"date\",\n        \"calendar_interval\": \"1d\"\n      }\n    }\n  }\n}"

@endgroup

@group {index}
@endpoint POST /{index}/_async_search
@desc Run an async search
@required {index: any # A comma-separated list of index names to search; use `_all` or empty string to perform the operation on all indices}
@optional {wait_for_completion_timeout: any # Blocks and waits until the search is completed up to a certain timeout. When the async search completes within the timeout, the response won’t include the ID as the results are not stored in the cluster., keep_alive: any # Specifies how long the async search needs to be available. Ongoing async searches and any saved search results are deleted after this period., keep_on_completion: bool # If `true`, results are stored for later retrieval when the search completes within the `wait_for_completion_timeout`., allow_no_indices: bool # A setting that does two separate checks on the index expression. If `false`, the request returns an error (1) if any wildcard expression (including `_all` and `*`) resolves to zero matching indices or (2) if the complete set of resolved indices, aliases or data streams is empty after all expressions are evaluated. If `true`, index expressions that resolve to no indices are allowed and the request returns an empty result., allow_partial_search_results: bool # Indicate if an error should be returned if there is a partial search failure or timeout, analyzer: str # The analyzer to use for the query string, analyze_wildcard: bool # Specify whether wildcard and prefix queries should be analyzed, batched_reduce_size: num # Affects how often partial results become available, which happens whenever shard results are reduced. A partial reduction is performed every time the coordinating node has received a certain number of new shard responses (5 by default)., ccs_minimize_roundtrips: bool # The default value is the only supported value., default_operator: str # The default operator for query string query (AND or OR), df: str # The field to use as default where no field prefix is given in the query string, docvalue_fields: any # A comma-separated list of fields to return as the docvalue representation of a field for each hit, expand_wildcards: any # Whether to expand wildcard expression to concrete indices that are open, closed or both, explain: bool # Specify whether to return detailed information about score computation as part of a hit, ignore_throttled: bool # Whether specified concrete, expanded or aliased indices should be ignored when throttled, ignore_unavailable: bool # If `false`, the request returns an error if it targets a concrete (non-wildcarded) index, alias, or data stream that is missing, closed, or otherwise unavailable. If `true`, unavailable concrete targets are silently ignored., lenient: bool # Specify whether format-based query failures (such as providing text to a numeric field) should be ignored, max_concurrent_shard_requests: num # The number of concurrent shard requests per node this search executes concurrently. This value should be used to limit the impact of the search on the cluster in order to limit the number of concurrent shard requests, preference: str # Specify the node or shard the operation should be performed on, request_cache: bool # Specify if request cache should be used for this request or not, defaults to true, routing: any # A comma-separated list of specific routing values, search_type: str # Search operation type, stats: [str] # Specific 'tag' of the request for logging and statistical purposes, stored_fields: any # A comma-separated list of stored fields to return as part of a hit, suggest_field: str # Specifies which field to use for suggestions., suggest_mode: str # Specify suggest mode, suggest_size: num # How many suggestions to return in response, suggest_text: str # The source text for which the suggestions should be returned., terminate_after: num # The maximum number of documents to collect for each shard, upon reaching which the query execution will terminate early, timeout: any # Explicit operation timeout, track_total_hits: any # Indicate if the number of documents that match the query should be tracked. A number can also be specified, to accurately track the total hit count up to the number., track_scores: bool # Whether to calculate and return scores even if they are not used for sorting, typed_keys: bool # Specify whether aggregation and suggester names should be prefixed by their respective types in the response, rest_total_hits_as_int: bool # Indicates whether hits.total should be rendered as an integer or an object in the rest search response, version: bool # Specify whether to return document version as part of a hit, _source: any # True or false to return the _source field or not, or a list of fields to return, _source_excludes: any # A list of fields to exclude from the returned _source field, _source_includes: any # A list of fields to extract and return from the _source field, seq_no_primary_term: bool # Specify whether to return sequence number and primary term of the last modification of each hit, q: str # Query in the Lucene query string syntax, size: num # Number of hits to return, from: num # Starting offset, sort: any # A comma-separated list of : pairs, aggregations: map, collapse: any, explain: bool=false # If true, returns detailed information about score computation as part of a hit., ext: map # Configuration of search extensions defined by Elasticsearch plugins., from: num=0 # Starting document offset. By default, you cannot page through more than 10,000 hits using the from and size parameters. To page through more hits, use the search_after parameter., highlight: any, track_total_hits: any # Number of hits matching the query to count accurately. If true, the exact number of hits is returned at the cost of some performance. If false, the response does not include the total number of hits matching the query. Defaults to 10,000 hits., indices_boost: [map] # Boosts the _score of documents from specified indices., docvalue_fields: [map{field!: any, format: str, include_unmapped: bool}] # Array of wildcard (*) patterns. The request returns doc values for field names matching these patterns in the hits.fields property of the response., knn: any # Defines the approximate kNN search to run., min_score: num # Minimum _score for matching documents. Documents with a lower _score are not included in search results and results collected by aggregations., post_filter: any, profile: bool, query: any # Defines the search definition using the Query DSL., rescore: any, script_fields: map # Retrieve a script evaluation (based on different fields) for each hit., search_after: any, size: num=10 # The number of hits to return. By default, you cannot page through more than 10,000 hits using the from and size parameters. To page through more hits, use the search_after parameter., slice: any, sort: any, _source: any # Indicates which source fields are returned for matching documents. These fields are returned in the hits._source property of the search response., fields: [map{field!: any, format: str, include_unmapped: bool}] # Array of wildcard (*) patterns. The request returns values for field names matching these patterns in the hits.fields property of the response., suggest: any, terminate_after: num=0 # Maximum number of documents to collect for each shard. If a query reaches this limit, Elasticsearch terminates the query early. Elasticsearch collects documents before sorting. Defaults to 0, which does not terminate query execution early., timeout: str # Specifies the period of time to wait for a response from each shard. If no response is received before the timeout expires, the request fails and returns an error. Defaults to no timeout., track_scores: bool=false # If true, calculate and return document scores, even if the scores are not used for sorting., version: bool=false # If true, returns document version as part of a hit., seq_no_primary_term: bool # If true, returns sequence number and primary term of the last modification of each hit. See Optimistic concurrency control., stored_fields: any # List of stored fields to return as part of a hit. If no fields are specified, no stored fields are included in the response. If this field is specified, the _source parameter defaults to false. You can pass _source: true to return both source fields and stored fields in the search response., pit: any # Limits the search to a point in time (PIT). If you provide a PIT, you cannot specify an  in the request path., runtime_mappings: any # Defines one or more runtime fields in the search request. These fields take precedence over mapped fields with the same name., stats: [str] # Stats groups to associate with the search. Each group maintains a statistics aggregation for its associated searches. You can retrieve these stats using the indices stats API.}
@returns(200)
@example_request "{\n  \"sort\": [\n    { \"date\": { \"order\": \"asc\" } }\n  ],\n  \"aggs\": {\n    \"sale_date\": {\n      \"date_histogram\": {\n        \"field\": \"date\",\n        \"calendar_interval\": \"1d\"\n      }\n    }\n  }\n}"

@endgroup

@group _bulk
@endpoint PUT /_bulk
@desc Bulk index or delete documents
@optional {include_source_on_error: bool # True or false if to include the document source in the error message in case of parsing errors., list_executed_pipelines: bool # If `true`, the response will include the ingest pipelines that were run for each index or create., pipeline: str # The pipeline identifier to use to preprocess incoming documents. If the index has a default ingest pipeline specified, setting the value to `_none` turns off the default ingest pipeline for this request. If a final pipeline is configured, it will always run regardless of the value of this parameter., refresh: str # If `true`, Elasticsearch refreshes the affected shards to make this operation visible to search. If `wait_for`, wait for a refresh to make this operation visible to search. If `false`, do nothing with refreshes. Valid values: `true`, `false`, `wait_for`., routing: any # A custom value that is used to route operations to a specific shard., _source: any # Indicates whether to return the `_source` field (`true` or `false`) or contains a list of fields to return., _source_excludes: any # A comma-separated list of source fields to exclude from the response. You can also use this parameter to exclude fields from the subset specified in `_source_includes` query parameter. If the `_source` parameter is `false`, this parameter is ignored., _source_includes: any # A comma-separated list of source fields to include in the response. If this parameter is specified, only these source fields are returned. You can exclude fields from this subset using the `_source_excludes` query parameter. If the `_source` parameter is `false`, this parameter is ignored., timeout: any # The period each action waits for the following operations: automatic index creation, dynamic mapping updates, and waiting for active shards. The default is `1m` (one minute), which guarantees Elasticsearch waits for at least the timeout before failing. The actual wait time could be longer, particularly when multiple waits occur., wait_for_active_shards: any # The number of shard copies that must be active before proceeding with the operation. Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). The default is `1`, which waits for each primary shard to be active., require_alias: bool # If `true`, the request's actions must target an index alias., require_data_stream: bool # If `true`, the request's actions must target a data stream (existing or to be created).}
@returns(200) {errors: bool, items: [map], took: num, ingest_took: num}
@example_request "{ \"index\" : { \"_index\" : \"test\", \"_id\" : \"1\" } }\n{ \"field1\" : \"value1\" }\n{ \"delete\" : { \"_index\" : \"test\", \"_id\" : \"2\" } }\n{ \"create\" : { \"_index\" : \"test\", \"_id\" : \"3\" } }\n{ \"field1\" : \"value3\" }\n{ \"update\" : {\"_id\" : \"1\", \"_index\" : \"test\"} }\n{ \"doc\" : {\"field2\" : \"value2\"} }"

@endpoint POST /_bulk
@desc Bulk index or delete documents
@optional {include_source_on_error: bool # True or false if to include the document source in the error message in case of parsing errors., list_executed_pipelines: bool # If `true`, the response will include the ingest pipelines that were run for each index or create., pipeline: str # The pipeline identifier to use to preprocess incoming documents. If the index has a default ingest pipeline specified, setting the value to `_none` turns off the default ingest pipeline for this request. If a final pipeline is configured, it will always run regardless of the value of this parameter., refresh: str # If `true`, Elasticsearch refreshes the affected shards to make this operation visible to search. If `wait_for`, wait for a refresh to make this operation visible to search. If `false`, do nothing with refreshes. Valid values: `true`, `false`, `wait_for`., routing: any # A custom value that is used to route operations to a specific shard., _source: any # Indicates whether to return the `_source` field (`true` or `false`) or contains a list of fields to return., _source_excludes: any # A comma-separated list of source fields to exclude from the response. You can also use this parameter to exclude fields from the subset specified in `_source_includes` query parameter. If the `_source` parameter is `false`, this parameter is ignored., _source_includes: any # A comma-separated list of source fields to include in the response. If this parameter is specified, only these source fields are returned. You can exclude fields from this subset using the `_source_excludes` query parameter. If the `_source` parameter is `false`, this parameter is ignored., timeout: any # The period each action waits for the following operations: automatic index creation, dynamic mapping updates, and waiting for active shards. The default is `1m` (one minute), which guarantees Elasticsearch waits for at least the timeout before failing. The actual wait time could be longer, particularly when multiple waits occur., wait_for_active_shards: any # The number of shard copies that must be active before proceeding with the operation. Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). The default is `1`, which waits for each primary shard to be active., require_alias: bool # If `true`, the request's actions must target an index alias., require_data_stream: bool # If `true`, the request's actions must target a data stream (existing or to be created).}
@returns(200) {errors: bool, items: [map], took: num, ingest_took: num}
@example_request "{ \"index\" : { \"_index\" : \"test\", \"_id\" : \"1\" } }\n{ \"field1\" : \"value1\" }\n{ \"delete\" : { \"_index\" : \"test\", \"_id\" : \"2\" } }\n{ \"create\" : { \"_index\" : \"test\", \"_id\" : \"3\" } }\n{ \"field1\" : \"value3\" }\n{ \"update\" : {\"_id\" : \"1\", \"_index\" : \"test\"} }\n{ \"doc\" : {\"field2\" : \"value2\"} }"

@endgroup

@group {index}
@endpoint PUT /{index}/_bulk
@desc Bulk index or delete documents
@required {index: str # The name of the data stream, index, or index alias to perform bulk actions on.}
@optional {include_source_on_error: bool # True or false if to include the document source in the error message in case of parsing errors., list_executed_pipelines: bool # If `true`, the response will include the ingest pipelines that were run for each index or create., pipeline: str # The pipeline identifier to use to preprocess incoming documents. If the index has a default ingest pipeline specified, setting the value to `_none` turns off the default ingest pipeline for this request. If a final pipeline is configured, it will always run regardless of the value of this parameter., refresh: str # If `true`, Elasticsearch refreshes the affected shards to make this operation visible to search. If `wait_for`, wait for a refresh to make this operation visible to search. If `false`, do nothing with refreshes. Valid values: `true`, `false`, `wait_for`., routing: any # A custom value that is used to route operations to a specific shard., _source: any # Indicates whether to return the `_source` field (`true` or `false`) or contains a list of fields to return., _source_excludes: any # A comma-separated list of source fields to exclude from the response. You can also use this parameter to exclude fields from the subset specified in `_source_includes` query parameter. If the `_source` parameter is `false`, this parameter is ignored., _source_includes: any # A comma-separated list of source fields to include in the response. If this parameter is specified, only these source fields are returned. You can exclude fields from this subset using the `_source_excludes` query parameter. If the `_source` parameter is `false`, this parameter is ignored., timeout: any # The period each action waits for the following operations: automatic index creation, dynamic mapping updates, and waiting for active shards. The default is `1m` (one minute), which guarantees Elasticsearch waits for at least the timeout before failing. The actual wait time could be longer, particularly when multiple waits occur., wait_for_active_shards: any # The number of shard copies that must be active before proceeding with the operation. Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). The default is `1`, which waits for each primary shard to be active., require_alias: bool # If `true`, the request's actions must target an index alias., require_data_stream: bool # If `true`, the request's actions must target a data stream (existing or to be created).}
@returns(200) {errors: bool, items: [map], took: num, ingest_took: num}
@example_request "{ \"index\" : { \"_index\" : \"test\", \"_id\" : \"1\" } }\n{ \"field1\" : \"value1\" }\n{ \"delete\" : { \"_index\" : \"test\", \"_id\" : \"2\" } }\n{ \"create\" : { \"_index\" : \"test\", \"_id\" : \"3\" } }\n{ \"field1\" : \"value3\" }\n{ \"update\" : {\"_id\" : \"1\", \"_index\" : \"test\"} }\n{ \"doc\" : {\"field2\" : \"value2\"} }"

@endpoint POST /{index}/_bulk
@desc Bulk index or delete documents
@required {index: str # The name of the data stream, index, or index alias to perform bulk actions on.}
@optional {include_source_on_error: bool # True or false if to include the document source in the error message in case of parsing errors., list_executed_pipelines: bool # If `true`, the response will include the ingest pipelines that were run for each index or create., pipeline: str # The pipeline identifier to use to preprocess incoming documents. If the index has a default ingest pipeline specified, setting the value to `_none` turns off the default ingest pipeline for this request. If a final pipeline is configured, it will always run regardless of the value of this parameter., refresh: str # If `true`, Elasticsearch refreshes the affected shards to make this operation visible to search. If `wait_for`, wait for a refresh to make this operation visible to search. If `false`, do nothing with refreshes. Valid values: `true`, `false`, `wait_for`., routing: any # A custom value that is used to route operations to a specific shard., _source: any # Indicates whether to return the `_source` field (`true` or `false`) or contains a list of fields to return., _source_excludes: any # A comma-separated list of source fields to exclude from the response. You can also use this parameter to exclude fields from the subset specified in `_source_includes` query parameter. If the `_source` parameter is `false`, this parameter is ignored., _source_includes: any # A comma-separated list of source fields to include in the response. If this parameter is specified, only these source fields are returned. You can exclude fields from this subset using the `_source_excludes` query parameter. If the `_source` parameter is `false`, this parameter is ignored., timeout: any # The period each action waits for the following operations: automatic index creation, dynamic mapping updates, and waiting for active shards. The default is `1m` (one minute), which guarantees Elasticsearch waits for at least the timeout before failing. The actual wait time could be longer, particularly when multiple waits occur., wait_for_active_shards: any # The number of shard copies that must be active before proceeding with the operation. Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). The default is `1`, which waits for each primary shard to be active., require_alias: bool # If `true`, the request's actions must target an index alias., require_data_stream: bool # If `true`, the request's actions must target a data stream (existing or to be created).}
@returns(200) {errors: bool, items: [map], took: num, ingest_took: num}
@example_request "{ \"index\" : { \"_index\" : \"test\", \"_id\" : \"1\" } }\n{ \"field1\" : \"value1\" }\n{ \"delete\" : { \"_index\" : \"test\", \"_id\" : \"2\" } }\n{ \"create\" : { \"_index\" : \"test\", \"_id\" : \"3\" } }\n{ \"field1\" : \"value3\" }\n{ \"update\" : {\"_id\" : \"1\", \"_index\" : \"test\"} }\n{ \"doc\" : {\"field2\" : \"value2\"} }"

@endgroup

@group _cat
@endpoint GET /_cat/aliases
@desc Get aliases
@optional {h: any # A comma-separated list of columns names to display. It supports simple wildcards., s: any # List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name., expand_wildcards: any # The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. It supports comma-separated values, such as `open,hidden`., master_timeout: any # The period to wait for a connection to the master node. If the master node is not available before the timeout expires, the request fails and returns an error. To indicated that the request should never timeout, you can set it to `-1`.}
@returns(200)

@endpoint GET /_cat/aliases/{name}
@desc Get aliases
@required {name: any # A comma-separated list of aliases to retrieve. Supports wildcards (`*`).  To retrieve all aliases, omit this parameter or use `*` or `_all`.}
@optional {h: any # A comma-separated list of columns names to display. It supports simple wildcards., s: any # List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name., expand_wildcards: any # The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. It supports comma-separated values, such as `open,hidden`., master_timeout: any # The period to wait for a connection to the master node. If the master node is not available before the timeout expires, the request fails and returns an error. To indicated that the request should never timeout, you can set it to `-1`.}
@returns(200)

@endpoint GET /_cat/allocation
@desc Get shard allocation information
@optional {h: any # A comma-separated list of columns names to display. It supports simple wildcards., s: any # List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name., local: bool # If `true`, the request computes the list of selected nodes from the local cluster state. If `false` the list of selected nodes are computed from the cluster state of the master node. In both cases the coordinating node will send requests for further information to each selected node., master_timeout: any # Period to wait for a connection to the master node.}
@returns(200)

@endpoint GET /_cat/allocation/{node_id}
@desc Get shard allocation information
@required {node_id: any # A comma-separated list of node identifiers or names used to limit the returned information.}
@optional {h: any # A comma-separated list of columns names to display. It supports simple wildcards., s: any # List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name., local: bool # If `true`, the request computes the list of selected nodes from the local cluster state. If `false` the list of selected nodes are computed from the cluster state of the master node. In both cases the coordinating node will send requests for further information to each selected node., master_timeout: any # Period to wait for a connection to the master node.}
@returns(200)

@endpoint GET /_cat/circuit_breaker
@desc Get circuit breakers statistics
@optional {h: any # A comma-separated list of columns names to display. It supports simple wildcards., s: any # List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name., local: bool # If `true`, the request computes the list of selected nodes from the local cluster state. If `false` the list of selected nodes are computed from the cluster state of the master node. In both cases the coordinating node will send requests for further information to each selected node., master_timeout: any # Period to wait for a connection to the master node.}
@returns(200)

@endpoint GET /_cat/circuit_breaker/{circuit_breaker_patterns}
@desc Get circuit breakers statistics
@required {circuit_breaker_patterns: any # A comma-separated list of regular-expressions to filter the circuit breakers in the output}
@optional {h: any # A comma-separated list of columns names to display. It supports simple wildcards., s: any # List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name., local: bool # If `true`, the request computes the list of selected nodes from the local cluster state. If `false` the list of selected nodes are computed from the cluster state of the master node. In both cases the coordinating node will send requests for further information to each selected node., master_timeout: any # Period to wait for a connection to the master node.}
@returns(200)

@endpoint GET /_cat/component_templates
@desc Get component templates
@optional {h: any # A comma-separated list of columns names to display. It supports simple wildcards., s: any # List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name., local: bool # If `true`, the request computes the list of selected nodes from the local cluster state. If `false` the list of selected nodes are computed from the cluster state of the master node. In both cases the coordinating node will send requests for further information to each selected node., master_timeout: any # The period to wait for a connection to the master node.}
@returns(200)

@endpoint GET /_cat/component_templates/{name}
@desc Get component templates
@required {name: str # The name of the component template. It accepts wildcard expressions. If it is omitted, all component templates are returned.}
@optional {h: any # A comma-separated list of columns names to display. It supports simple wildcards., s: any # List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name., local: bool # If `true`, the request computes the list of selected nodes from the local cluster state. If `false` the list of selected nodes are computed from the cluster state of the master node. In both cases the coordinating node will send requests for further information to each selected node., master_timeout: any # The period to wait for a connection to the master node.}
@returns(200)

@endpoint GET /_cat/count
@desc Get a document count
@optional {h: any # A comma-separated list of columns names to display. It supports simple wildcards., s: any # List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name.}
@returns(200)

@endpoint POST /_cat/count
@desc Get a document count
@optional {h: any # A comma-separated list of columns names to display. It supports simple wildcards., s: any # List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name.}
@returns(200)

@endpoint GET /_cat/count/{index}
@desc Get a document count
@required {index: any # A comma-separated list of data streams, indices, and aliases used to limit the request. It supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`.}
@optional {h: any # A comma-separated list of columns names to display. It supports simple wildcards., s: any # List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name.}
@returns(200)

@endpoint POST /_cat/count/{index}
@desc Get a document count
@required {index: any # A comma-separated list of data streams, indices, and aliases used to limit the request. It supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`.}
@optional {h: any # A comma-separated list of columns names to display. It supports simple wildcards., s: any # List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name.}
@returns(200)

@endpoint GET /_cat/fielddata
@desc Get field data cache information
@optional {fields: any # Comma-separated list of fields used to limit returned information., h: any # A comma-separated list of columns names to display. It supports simple wildcards., s: any # List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name.}
@returns(200)

@endpoint GET /_cat/fielddata/{fields}
@desc Get field data cache information
@required {fields: any # Comma-separated list of fields used to limit returned information. To retrieve all fields, omit this parameter.}
@optional {fields: any # Comma-separated list of fields used to limit returned information., h: any # A comma-separated list of columns names to display. It supports simple wildcards., s: any # List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name.}
@returns(200)

@endpoint GET /_cat/health
@desc Get the cluster health status
@optional {ts: bool # If true, returns `HH:MM:SS` and Unix epoch timestamps., h: any # A comma-separated list of columns names to display. It supports simple wildcards., s: any # List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name.}
@returns(200)

@endpoint GET /_cat
@desc Get CAT help
@returns(200)

@endpoint GET /_cat/indices
@desc Get index information
@optional {expand_wildcards: any # The type of index that wildcard patterns can match., health: str # The health status used to limit returned indices. By default, the response includes indices of any health status., include_unloaded_segments: bool # If true, the response includes information from segments that are not loaded into memory., pri: bool # If true, the response only includes information from primary shards., master_timeout: any # Period to wait for a connection to the master node., h: any # A comma-separated list of columns names to display. It supports simple wildcards., s: any # List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name.}
@returns(200)

@endpoint GET /_cat/indices/{index}
@desc Get index information
@required {index: any # Comma-separated list of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`.}
@optional {expand_wildcards: any # The type of index that wildcard patterns can match., health: str # The health status used to limit returned indices. By default, the response includes indices of any health status., include_unloaded_segments: bool # If true, the response includes information from segments that are not loaded into memory., pri: bool # If true, the response only includes information from primary shards., master_timeout: any # Period to wait for a connection to the master node., h: any # A comma-separated list of columns names to display. It supports simple wildcards., s: any # List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name.}
@returns(200)

@endpoint GET /_cat/master
@desc Get master node information
@optional {h: any # A comma-separated list of columns names to display. It supports simple wildcards., s: any # List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name., local: bool # If `true`, the request computes the list of selected nodes from the local cluster state. If `false` the list of selected nodes are computed from the cluster state of the master node. In both cases the coordinating node will send requests for further information to each selected node., master_timeout: any # Period to wait for a connection to the master node.}
@returns(200)

@endpoint GET /_cat/ml/data_frame/analytics
@desc Get data frame analytics jobs
@optional {allow_no_match: bool # Whether to ignore if a wildcard expression matches no configs. (This includes `_all` string or when no configs have been specified.), h: any # Comma-separated list of column names to display., s: any # Comma-separated list of column names or column aliases used to sort the response.}
@returns(200)

@endpoint GET /_cat/ml/data_frame/analytics/{id}
@desc Get data frame analytics jobs
@required {id: str # The ID of the data frame analytics to fetch}
@optional {allow_no_match: bool # Whether to ignore if a wildcard expression matches no configs. (This includes `_all` string or when no configs have been specified.), h: any # Comma-separated list of column names to display., s: any # Comma-separated list of column names or column aliases used to sort the response.}
@returns(200)

@endpoint GET /_cat/ml/datafeeds
@desc Get datafeeds
@optional {allow_no_match: bool # Specifies what to do when the request:  * Contains wildcard expressions and there are no datafeeds that match. * Contains the `_all` string or no identifiers and there are no matches. * Contains wildcard expressions and there are only partial matches.  If `true`, the API returns an empty datafeeds array when there are no matches and the subset of results when there are partial matches. If `false`, the API returns a 404 status code when there are no matches or only partial matches., h: any # Comma-separated list of column names to display., s: any # Comma-separated list of column names or column aliases used to sort the response.}
@returns(200)

@endpoint GET /_cat/ml/datafeeds/{datafeed_id}
@desc Get datafeeds
@required {datafeed_id: str # A numerical character string that uniquely identifies the datafeed.}
@optional {allow_no_match: bool # Specifies what to do when the request:  * Contains wildcard expressions and there are no datafeeds that match. * Contains the `_all` string or no identifiers and there are no matches. * Contains wildcard expressions and there are only partial matches.  If `true`, the API returns an empty datafeeds array when there are no matches and the subset of results when there are partial matches. If `false`, the API returns a 404 status code when there are no matches or only partial matches., h: any # Comma-separated list of column names to display., s: any # Comma-separated list of column names or column aliases used to sort the response.}
@returns(200)

@endpoint GET /_cat/ml/anomaly_detectors
@desc Get anomaly detection jobs
@optional {allow_no_match: bool # Specifies what to do when the request:  * Contains wildcard expressions and there are no jobs that match. * Contains the `_all` string or no identifiers and there are no matches. * Contains wildcard expressions and there are only partial matches.  If `true`, the API returns an empty jobs array when there are no matches and the subset of results when there are partial matches. If `false`, the API returns a 404 status code when there are no matches or only partial matches., h: any # Comma-separated list of column names to display., s: any # Comma-separated list of column names or column aliases used to sort the response.}
@returns(200)

@endpoint GET /_cat/ml/anomaly_detectors/{job_id}
@desc Get anomaly detection jobs
@required {job_id: str # Identifier for the anomaly detection job.}
@optional {allow_no_match: bool # Specifies what to do when the request:  * Contains wildcard expressions and there are no jobs that match. * Contains the `_all` string or no identifiers and there are no matches. * Contains wildcard expressions and there are only partial matches.  If `true`, the API returns an empty jobs array when there are no matches and the subset of results when there are partial matches. If `false`, the API returns a 404 status code when there are no matches or only partial matches., h: any # Comma-separated list of column names to display., s: any # Comma-separated list of column names or column aliases used to sort the response.}
@returns(200)

@endpoint GET /_cat/ml/trained_models
@desc Get trained models
@optional {allow_no_match: bool # Specifies what to do when the request: contains wildcard expressions and there are no models that match; contains the `_all` string or no identifiers and there are no matches; contains wildcard expressions and there are only partial matches. If `true`, the API returns an empty array when there are no matches and the subset of results when there are partial matches. If `false`, the API returns a 404 status code when there are no matches or only partial matches., h: any # A comma-separated list of column names to display., s: any # A comma-separated list of column names or aliases used to sort the response., from: num # Skips the specified number of transforms., size: num # The maximum number of transforms to display.}
@returns(200)

@endpoint GET /_cat/ml/trained_models/{model_id}
@desc Get trained models
@required {model_id: str # A unique identifier for the trained model.}
@optional {allow_no_match: bool # Specifies what to do when the request: contains wildcard expressions and there are no models that match; contains the `_all` string or no identifiers and there are no matches; contains wildcard expressions and there are only partial matches. If `true`, the API returns an empty array when there are no matches and the subset of results when there are partial matches. If `false`, the API returns a 404 status code when there are no matches or only partial matches., h: any # A comma-separated list of column names to display., s: any # A comma-separated list of column names or aliases used to sort the response., from: num # Skips the specified number of transforms., size: num # The maximum number of transforms to display.}
@returns(200)

@endpoint GET /_cat/nodeattrs
@desc Get node attribute information
@optional {h: any # A comma-separated list of columns names to display. It supports simple wildcards., s: any # List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name., local: bool # If `true`, the request computes the list of selected nodes from the local cluster state. If `false` the list of selected nodes are computed from the cluster state of the master node. In both cases the coordinating node will send requests for further information to each selected node., master_timeout: any # Period to wait for a connection to the master node.}
@returns(200)

@endpoint GET /_cat/nodes
@desc Get node information
@optional {full_id: bool # If `true`, return the full node ID. If `false`, return the shortened node ID., include_unloaded_segments: bool # If true, the response includes information from segments that are not loaded into memory., h: any # A comma-separated list of columns names to display. It supports simple wildcards., s: any # A comma-separated list of column names or aliases that determines the sort order. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name., master_timeout: any # The period to wait for a connection to the master node.}
@returns(200)

@endpoint GET /_cat/pending_tasks
@desc Get pending task information
@optional {h: any # A comma-separated list of columns names to display. It supports simple wildcards., s: any # List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name., local: bool # If `true`, the request computes the list of selected nodes from the local cluster state. If `false` the list of selected nodes are computed from the cluster state of the master node. In both cases the coordinating node will send requests for further information to each selected node., master_timeout: any # Period to wait for a connection to the master node.}
@returns(200)

@endpoint GET /_cat/plugins
@desc Get plugin information
@optional {h: any # A comma-separated list of columns names to display. It supports simple wildcards., s: any # List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name., include_bootstrap: bool # Include bootstrap plugins in the response, local: bool # If `true`, the request computes the list of selected nodes from the local cluster state. If `false` the list of selected nodes are computed from the cluster state of the master node. In both cases the coordinating node will send requests for further information to each selected node., master_timeout: any # Period to wait for a connection to the master node.}
@returns(200)

@endpoint GET /_cat/recovery
@desc Get shard recovery information
@optional {active_only: bool # If `true`, the response only includes ongoing shard recoveries., detailed: bool # If `true`, the response includes detailed information about shard recoveries., index: any # Comma-separated list or wildcard expression of index names to limit the returned information, h: any # A comma-separated list of columns names to display. It supports simple wildcards., s: any # A comma-separated list of column names or aliases that determines the sort order. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name.}
@returns(200)

@endpoint GET /_cat/recovery/{index}
@desc Get shard recovery information
@required {index: any # A comma-separated list of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`.}
@optional {active_only: bool # If `true`, the response only includes ongoing shard recoveries., detailed: bool # If `true`, the response includes detailed information about shard recoveries., index: any # Comma-separated list or wildcard expression of index names to limit the returned information, h: any # A comma-separated list of columns names to display. It supports simple wildcards., s: any # A comma-separated list of column names or aliases that determines the sort order. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name.}
@returns(200)

@endpoint GET /_cat/repositories
@desc Get snapshot repository information
@optional {h: any # List of columns to appear in the response. Supports simple wildcards., s: any # List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name., local: bool # If `true`, the request computes the list of selected nodes from the local cluster state. If `false` the list of selected nodes are computed from the cluster state of the master node. In both cases the coordinating node will send requests for further information to each selected node., master_timeout: any # Period to wait for a connection to the master node.}
@returns(200)

@endpoint GET /_cat/segments
@desc Get segment information
@optional {h: any # A comma-separated list of columns names to display. It supports simple wildcards., s: any # A comma-separated list of column names or aliases that determines the sort order. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name., local: bool # If `true`, the request computes the list of selected nodes from the local cluster state. If `false` the list of selected nodes are computed from the cluster state of the master node. In both cases the coordinating node will send requests for further information to each selected node., master_timeout: any # Period to wait for a connection to the master node., expand_wildcards: any # Type of index that wildcard expressions can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as open,hidden., allow_no_indices: bool # A setting that does two separate checks on the index expression. If `false`, the request returns an error (1) if any wildcard expression (including `_all` and `*`) resolves to zero matching indices or (2) if the complete set of resolved indices, aliases or data streams is empty after all expressions are evaluated. If `true`, index expressions that resolve to no indices are allowed and the request returns an empty result., ignore_throttled: bool # If true, concrete, expanded or aliased indices are ignored when frozen., ignore_unavailable: bool # If `false`, the request returns an error if it targets a concrete (non-wildcarded) index, alias, or data stream that is missing, closed, or otherwise unavailable. If `true`, unavailable concrete targets are silently ignored., allow_closed: bool # If true, allow closed indices to be returned in the response otherwise if false, keep the legacy behaviour of throwing an exception if index pattern matches closed indices}
@returns(200)

@endpoint GET /_cat/segments/{index}
@desc Get segment information
@required {index: any # A comma-separated list of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`.}
@optional {h: any # A comma-separated list of columns names to display. It supports simple wildcards., s: any # A comma-separated list of column names or aliases that determines the sort order. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name., local: bool # If `true`, the request computes the list of selected nodes from the local cluster state. If `false` the list of selected nodes are computed from the cluster state of the master node. In both cases the coordinating node will send requests for further information to each selected node., master_timeout: any # Period to wait for a connection to the master node., expand_wildcards: any # Type of index that wildcard expressions can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as open,hidden., allow_no_indices: bool # A setting that does two separate checks on the index expression. If `false`, the request returns an error (1) if any wildcard expression (including `_all` and `*`) resolves to zero matching indices or (2) if the complete set of resolved indices, aliases or data streams is empty after all expressions are evaluated. If `true`, index expressions that resolve to no indices are allowed and the request returns an empty result., ignore_throttled: bool # If true, concrete, expanded or aliased indices are ignored when frozen., ignore_unavailable: bool # If `false`, the request returns an error if it targets a concrete (non-wildcarded) index, alias, or data stream that is missing, closed, or otherwise unavailable. If `true`, unavailable concrete targets are silently ignored., allow_closed: bool # If true, allow closed indices to be returned in the response otherwise if false, keep the legacy behaviour of throwing an exception if index pattern matches closed indices}
@returns(200)

@endpoint GET /_cat/shards
@desc Get shard information
@optional {h: any # List of columns to appear in the response. Supports simple wildcards., s: any # A comma-separated list of column names or aliases that determines the sort order. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name., master_timeout: any # The period to wait for a connection to the master node.}
@returns(200)

@endpoint GET /_cat/shards/{index}
@desc Get shard information
@required {index: any # A comma-separated list of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`.}
@optional {h: any # List of columns to appear in the response. Supports simple wildcards., s: any # A comma-separated list of column names or aliases that determines the sort order. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name., master_timeout: any # The period to wait for a connection to the master node.}
@returns(200)

@endpoint GET /_cat/snapshots
@desc Get snapshot information
@optional {ignore_unavailable: bool # If `true`, the response does not include information from unavailable snapshots., h: any # A comma-separated list of columns names to display. It supports simple wildcards., s: any # List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name., master_timeout: any # Period to wait for a connection to the master node.}
@returns(200)

@endpoint GET /_cat/snapshots/{repository}
@desc Get snapshot information
@required {repository: any # A comma-separated list of snapshot repositories used to limit the request. Accepts wildcard expressions. `_all` returns all repositories. If any repository fails during the request, Elasticsearch returns an error.}
@optional {ignore_unavailable: bool # If `true`, the response does not include information from unavailable snapshots., h: any # A comma-separated list of columns names to display. It supports simple wildcards., s: any # List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name., master_timeout: any # Period to wait for a connection to the master node.}
@returns(200)

@endpoint GET /_cat/tasks
@desc Get task information
@optional {actions: [str] # The task action names, which are used to limit the response., detailed: bool # If `true`, the response includes detailed information about shard recoveries., nodes: [str] # Unique node identifiers, which are used to limit the response., parent_task_id: str # The parent task identifier, which is used to limit the response., h: any # A comma-separated list of columns names to display. It supports simple wildcards., s: any # List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name., timeout: any # Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error., wait_for_completion: bool # If `true`, the request blocks until the task has completed.}
@returns(200)

@endpoint GET /_cat/templates
@desc Get index template information
@optional {h: any # A comma-separated list of columns names to display. It supports simple wildcards., s: any # List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name., local: bool # If `true`, the request computes the list of selected nodes from the local cluster state. If `false` the list of selected nodes are computed from the cluster state of the master node. In both cases the coordinating node will send requests for further information to each selected node., master_timeout: any # Period to wait for a connection to the master node.}
@returns(200)

@endpoint GET /_cat/templates/{name}
@desc Get index template information
@required {name: str # The name of the template to return. Accepts wildcard expressions. If omitted, all templates are returned.}
@optional {h: any # A comma-separated list of columns names to display. It supports simple wildcards., s: any # List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name., local: bool # If `true`, the request computes the list of selected nodes from the local cluster state. If `false` the list of selected nodes are computed from the cluster state of the master node. In both cases the coordinating node will send requests for further information to each selected node., master_timeout: any # Period to wait for a connection to the master node.}
@returns(200)

@endpoint GET /_cat/thread_pool
@desc Get thread pool statistics
@optional {h: any # List of columns to appear in the response. Supports simple wildcards., s: any # A comma-separated list of column names or aliases that determines the sort order. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name., local: bool # If `true`, the request computes the list of selected nodes from the local cluster state. If `false` the list of selected nodes are computed from the cluster state of the master node. In both cases the coordinating node will send requests for further information to each selected node., master_timeout: any # The period to wait for a connection to the master node.}
@returns(200)

@endpoint GET /_cat/thread_pool/{thread_pool_patterns}
@desc Get thread pool statistics
@required {thread_pool_patterns: any # A comma-separated list of thread pool names used to limit the request. Accepts wildcard expressions.}
@optional {h: any # List of columns to appear in the response. Supports simple wildcards., s: any # A comma-separated list of column names or aliases that determines the sort order. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name., local: bool # If `true`, the request computes the list of selected nodes from the local cluster state. If `false` the list of selected nodes are computed from the cluster state of the master node. In both cases the coordinating node will send requests for further information to each selected node., master_timeout: any # The period to wait for a connection to the master node.}
@returns(200)

@endpoint GET /_cat/transforms
@desc Get transform information
@optional {allow_no_match: bool # Specifies what to do when the request: contains wildcard expressions and there are no transforms that match; contains the `_all` string or no identifiers and there are no matches; contains wildcard expressions and there are only partial matches. If `true`, it returns an empty transforms array when there are no matches and the subset of results when there are partial matches. If `false`, the request returns a 404 status code when there are no matches or only partial matches., from: num # Skips the specified number of transforms., h: any # Comma-separated list of column names to display., s: any # Comma-separated list of column names or column aliases used to sort the response., size: num # The maximum number of transforms to obtain.}
@returns(200)

@endpoint GET /_cat/transforms/{transform_id}
@desc Get transform information
@required {transform_id: str # A transform identifier or a wildcard expression. If you do not specify one of these options, the API returns information for all transforms.}
@optional {allow_no_match: bool # Specifies what to do when the request: contains wildcard expressions and there are no transforms that match; contains the `_all` string or no identifiers and there are no matches; contains wildcard expressions and there are only partial matches. If `true`, it returns an empty transforms array when there are no matches and the subset of results when there are partial matches. If `false`, the request returns a 404 status code when there are no matches or only partial matches., from: num # Skips the specified number of transforms., h: any # Comma-separated list of column names to display., s: any # Comma-separated list of column names or column aliases used to sort the response., size: num # The maximum number of transforms to obtain.}
@returns(200)

@endgroup

@group _ccr
@endpoint GET /_ccr/auto_follow/{name}
@desc Get auto-follow patterns
@required {name: str # The auto-follow pattern collection that you want to retrieve. If you do not specify a name, the API returns information for all collections.}
@optional {master_timeout: any # The period to wait for a connection to the master node. If the master node is not available before the timeout expires, the request fails and returns an error. It can also be set to `-1` to indicate that the request should never timeout.}
@returns(200) {patterns: [map]}

@endpoint PUT /_ccr/auto_follow/{name}
@desc Create or update auto-follow patterns
@required {name: str # The name of the collection of auto-follow patterns., remote_cluster: str # The remote cluster containing the leader indices to match against.}
@optional {master_timeout: any # Period to wait for a connection to the master node., follow_index_pattern: any # The name of follower index. The template {{leader_index}} can be used to derive the name of the follower index from the name of the leader index. When following a data stream, use {{leader_index}}; CCR does not support changes to the names of a follower data stream’s backing indices., leader_index_patterns: any # An array of simple index patterns to match against indices in the remote cluster specified by the remote_cluster field., leader_index_exclusion_patterns: any # An array of simple index patterns that can be used to exclude indices from being auto-followed. Indices in the remote cluster whose names are matching one or more leader_index_patterns and one or more leader_index_exclusion_patterns won’t be followed., max_outstanding_read_requests: num=12 # The maximum number of outstanding reads requests from the remote cluster., settings: map # Settings to override from the leader index. Note that certain settings can not be overrode (e.g., index.number_of_shards)., max_outstanding_write_requests: num=9 # The maximum number of outstanding reads requests from the remote cluster., read_poll_timeout: any=1m # The maximum time to wait for new operations on the remote cluster when the follower index is synchronized with the leader index. When the timeout has elapsed, the poll for operations will return to the follower so that it can update some statistics. Then the follower will immediately attempt to read from the leader again., max_read_request_operation_count: num=5120 # The maximum number of operations to pull per read from the remote cluster., max_read_request_size: any=32mb # The maximum size in bytes of per read of a batch of operations pulled from the remote cluster., max_retry_delay: any=500ms # The maximum time to wait before retrying an operation that failed exceptionally. An exponential backoff strategy is employed when retrying., max_write_buffer_count: num=2147483647 # The maximum number of operations that can be queued for writing. When this limit is reached, reads from the remote cluster will be deferred until the number of queued operations goes below the limit., max_write_buffer_size: any=512mb # The maximum total bytes of operations that can be queued for writing. When this limit is reached, reads from the remote cluster will be deferred until the total bytes of queued operations goes below the limit., max_write_request_operation_count: num=5120 # The maximum number of operations per bulk write request executed on the follower., max_write_request_size: any=9223372036854775807b # The maximum total bytes of operations per bulk write request executed on the follower.}
@returns(200) {acknowledged: bool}
@example_request "{\n  \"remote_cluster\" : \"remote_cluster\",\n  \"leader_index_patterns\" :\n  [\n    \"leader_index*\"\n  ],\n  \"follow_index_pattern\" : \"{{leader_index}}-follower\",\n  \"settings\": {\n    \"index.number_of_replicas\": 0\n  },\n  \"max_read_request_operation_count\" : 1024,\n  \"max_outstanding_read_requests\" : 16,\n  \"max_read_request_size\" : \"1024k\",\n  \"max_write_request_operation_count\" : 32768,\n  \"max_write_request_size\" : \"16k\",\n  \"max_outstanding_write_requests\" : 8,\n  \"max_write_buffer_count\" : 512,\n  \"max_write_buffer_size\" : \"512k\",\n  \"max_retry_delay\" : \"10s\",\n  \"read_poll_timeout\" : \"30s\"\n}"

@endpoint DELETE /_ccr/auto_follow/{name}
@desc Delete auto-follow patterns
@required {name: str # The auto-follow pattern collection to delete.}
@optional {master_timeout: any # The period to wait for a connection to the master node. If the master node is not available before the timeout expires, the request fails and returns an error. It can also be set to `-1` to indicate that the request should never timeout.}
@returns(200) {acknowledged: bool}

@endgroup

@group {index}
@endpoint PUT /{index}/_ccr/follow
@desc Create a follower
@required {index: str # The name of the follower index., leader_index: any # The name of the index in the leader cluster to follow., remote_cluster: str # The remote cluster containing the leader index.}
@optional {master_timeout: any # Period to wait for a connection to the master node., wait_for_active_shards: any # Specifies the number of shards to wait on being active before responding. This defaults to waiting on none of the shards to be active. A shard must be restored from the leader index before being active. Restoring a follower shard requires transferring all the remote Lucene segment files to the follower index., data_stream_name: str # If the leader index is part of a data stream, the name to which the local data stream for the followed index should be renamed., max_outstanding_read_requests: num # The maximum number of outstanding reads requests from the remote cluster., max_outstanding_write_requests: num # The maximum number of outstanding write requests on the follower., max_read_request_operation_count: num # The maximum number of operations to pull per read from the remote cluster., max_read_request_size: any # The maximum size in bytes of per read of a batch of operations pulled from the remote cluster., max_retry_delay: any # The maximum time to wait before retrying an operation that failed exceptionally. An exponential backoff strategy is employed when retrying., max_write_buffer_count: num # The maximum number of operations that can be queued for writing. When this limit is reached, reads from the remote cluster will be deferred until the number of queued operations goes below the limit., max_write_buffer_size: any # The maximum total bytes of operations that can be queued for writing. When this limit is reached, reads from the remote cluster will be deferred until the total bytes of queued operations goes below the limit., max_write_request_operation_count: num # The maximum number of operations per bulk write request executed on the follower., max_write_request_size: any # The maximum total bytes of operations per bulk write request executed on the follower., read_poll_timeout: any # The maximum time to wait for new operations on the remote cluster when the follower index is synchronized with the leader index. When the timeout has elapsed, the poll for operations will return to the follower so that it can update some statistics. Then the follower will immediately attempt to read from the leader again., settings: any # Settings to override from the leader index.}
@returns(200) {follow_index_created: bool, follow_index_shards_acked: bool, index_following_started: bool}
@example_request "{\n  \"remote_cluster\" : \"remote_cluster\",\n  \"leader_index\" : \"leader_index\",\n  \"settings\": {\n    \"index.number_of_replicas\": 0\n  },\n  \"max_read_request_operation_count\" : 1024,\n  \"max_outstanding_read_requests\" : 16,\n  \"max_read_request_size\" : \"1024k\",\n  \"max_write_request_operation_count\" : 32768,\n  \"max_write_request_size\" : \"16k\",\n  \"max_outstanding_write_requests\" : 8,\n  \"max_write_buffer_count\" : 512,\n  \"max_write_buffer_size\" : \"512k\",\n  \"max_retry_delay\" : \"10s\",\n  \"read_poll_timeout\" : \"30s\"\n}"

@endpoint GET /{index}/_ccr/info
@desc Get follower information
@required {index: any # A comma-delimited list of follower index patterns.}
@optional {master_timeout: any # The period to wait for a connection to the master node. If the master node is not available before the timeout expires, the request fails and returns an error. It can also be set to `-1` to indicate that the request should never timeout.}
@returns(200) {follower_indices: [map]}

@endpoint GET /{index}/_ccr/stats
@desc Get follower stats
@required {index: any # A comma-delimited list of index patterns.}
@optional {timeout: any # The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error.}
@returns(200) {indices: [map]}

@endpoint POST /{index}/_ccr/forget_follower
@desc Forget a follower
@required {index: str # Name of the leader index for which specified follower retention leases should be removed}
@optional {timeout: any # Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error., follower_cluster: str, follower_index: any, follower_index_uuid: any, leader_remote_cluster: str}
@returns(200) {_shards: any}
@example_request "{\n  \"follower_cluster\" : \"<follower_cluster>\",\n  \"follower_index\" : \"<follower_index>\",\n  \"follower_index_uuid\" : \"<follower_index_uuid>\",\n  \"leader_remote_cluster\" : \"<leader_remote_cluster>\"\n}"

@endgroup

@group _ccr
@endpoint GET /_ccr/auto_follow
@desc Get auto-follow patterns
@optional {master_timeout: any # The period to wait for a connection to the master node. If the master node is not available before the timeout expires, the request fails and returns an error. It can also be set to `-1` to indicate that the request should never timeout.}
@returns(200) {patterns: [map]}

@endpoint POST /_ccr/auto_follow/{name}/pause
@desc Pause an auto-follow pattern
@required {name: str # The name of the auto-follow pattern to pause.}
@optional {master_timeout: any # The period to wait for a connection to the master node. If the master node is not available before the timeout expires, the request fails and returns an error. It can also be set to `-1` to indicate that the request should never timeout.}
@returns(200) {acknowledged: bool}

@endgroup

@group {index}
@endpoint POST /{index}/_ccr/pause_follow
@desc Pause a follower
@required {index: str # The name of the follower index.}
@optional {master_timeout: any # The period to wait for a connection to the master node. If the master node is not available before the timeout expires, the request fails and returns an error. It can also be set to `-1` to indicate that the request should never timeout.}
@returns(200) {acknowledged: bool}

@endgroup

@group _ccr
@endpoint POST /_ccr/auto_follow/{name}/resume
@desc Resume an auto-follow pattern
@required {name: str # The name of the auto-follow pattern to resume.}
@optional {master_timeout: any # The period to wait for a connection to the master node. If the master node is not available before the timeout expires, the request fails and returns an error. It can also be set to `-1` to indicate that the request should never timeout.}
@returns(200) {acknowledged: bool}

@endgroup

@group {index}
@endpoint POST /{index}/_ccr/resume_follow
@desc Resume a follower
@required {index: str # Name of the follow index to resume following}
@optional {master_timeout: any # Period to wait for a connection to the master node., max_outstanding_read_requests: num, max_outstanding_write_requests: num, max_read_request_operation_count: num, max_read_request_size: str, max_retry_delay: any, max_write_buffer_count: num, max_write_buffer_size: str, max_write_request_operation_count: num, max_write_request_size: str, read_poll_timeout: any}
@returns(200) {acknowledged: bool}
@example_request "{\n  \"max_read_request_operation_count\" : 1024,\n  \"max_outstanding_read_requests\" : 16,\n  \"max_read_request_size\" : \"1024k\",\n  \"max_write_request_operation_count\" : 32768,\n  \"max_write_request_size\" : \"16k\",\n  \"max_outstanding_write_requests\" : 8,\n  \"max_write_buffer_count\" : 512,\n  \"max_write_buffer_size\" : \"512k\",\n  \"max_retry_delay\" : \"10s\",\n  \"read_poll_timeout\" : \"30s\"\n}"

@endgroup

@group _ccr
@endpoint GET /_ccr/stats
@desc Get cross-cluster replication stats
@optional {master_timeout: any # The period to wait for a connection to the master node. If the master node is not available before the timeout expires, the request fails and returns an error. It can also be set to `-1` to indicate that the request should never timeout., timeout: any # The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error.}
@returns(200) {auto_follow_stats: any, follow_stats: any}

@endgroup

@group {index}
@endpoint POST /{index}/_ccr/unfollow
@desc Unfollow an index
@required {index: str # The name of the follower index.}
@optional {master_timeout: any # The period to wait for a connection to the master node. If the master node is not available before the timeout expires, the request fails and returns an error. It can also be set to `-1` to indicate that the request should never timeout.}
@returns(200) {acknowledged: bool}

@endgroup

@group _search
@endpoint GET /_search/scroll
@desc Run a scrolling search
@required {scroll_id: any # The scroll ID of the search.}
@optional {scroll: any # The period to retain the search context for scrolling., scroll_id: str # The scroll ID, rest_total_hits_as_int: bool # If true, the API response’s hit.total property is returned as an integer. If false, the API response’s hit.total property is returned as an object., scroll: any=1d # The period to retain the search context for scrolling.}
@returns(200) {took: num, timed_out: bool, _shards: any, hits: any, aggregations: map, _clusters: any, fields: map, max_score: num, num_reduce_phases: num, profile: any, pit_id: any, _scroll_id: any, suggest: map, terminated_early: bool}
@example_request "{\n  \"scroll_id\" : \"DXF1ZXJ5QW5kRmV0Y2gBAAAAAAAAAD4WYm9laVYtZndUQlNsdDcwakFMNjU1QQ==\"\n}"

@endpoint POST /_search/scroll
@desc Run a scrolling search
@required {scroll_id: any # The scroll ID of the search.}
@optional {scroll: any # The period to retain the search context for scrolling., scroll_id: str # The scroll ID, rest_total_hits_as_int: bool # If true, the API response’s hit.total property is returned as an integer. If false, the API response’s hit.total property is returned as an object., scroll: any=1d # The period to retain the search context for scrolling.}
@returns(200) {took: num, timed_out: bool, _shards: any, hits: any, aggregations: map, _clusters: any, fields: map, max_score: num, num_reduce_phases: num, profile: any, pit_id: any, _scroll_id: any, suggest: map, terminated_early: bool}
@example_request "{\n  \"scroll_id\" : \"DXF1ZXJ5QW5kRmV0Y2gBAAAAAAAAAD4WYm9laVYtZndUQlNsdDcwakFMNjU1QQ==\"\n}"

@endpoint DELETE /_search/scroll
@desc Clear a scrolling search
@optional {scroll_id: any # The scroll IDs to clear. To clear all scroll IDs, use `_all`.}
@returns(200) {succeeded: bool, num_freed: num}
@example_request "{\n  \"scroll_id\": \"DXF1ZXJ5QW5kRmV0Y2gBAAAAAAAAAD4WYm9laVYtZndUQlNsdDcwakFMNjU1QQ==\"\n}"

@endpoint GET /_search/scroll/{scroll_id}
@desc Run a scrolling search
@required {scroll_id: str # The scroll ID, scroll_id: any # The scroll ID of the search.}
@optional {scroll: any # The period to retain the search context for scrolling., scroll_id: str # The scroll ID, rest_total_hits_as_int: bool # If true, the API response’s hit.total property is returned as an integer. If false, the API response’s hit.total property is returned as an object., scroll: any=1d # The period to retain the search context for scrolling.}
@returns(200) {took: num, timed_out: bool, _shards: any, hits: any, aggregations: map, _clusters: any, fields: map, max_score: num, num_reduce_phases: num, profile: any, pit_id: any, _scroll_id: any, suggest: map, terminated_early: bool}
@example_request "{\n  \"scroll_id\" : \"DXF1ZXJ5QW5kRmV0Y2gBAAAAAAAAAD4WYm9laVYtZndUQlNsdDcwakFMNjU1QQ==\"\n}"

@endpoint POST /_search/scroll/{scroll_id}
@desc Run a scrolling search
@required {scroll_id: str # The scroll ID, scroll_id: any # The scroll ID of the search.}
@optional {scroll: any # The period to retain the search context for scrolling., scroll_id: str # The scroll ID, rest_total_hits_as_int: bool # If true, the API response’s hit.total property is returned as an integer. If false, the API response’s hit.total property is returned as an object., scroll: any=1d # The period to retain the search context for scrolling.}
@returns(200) {took: num, timed_out: bool, _shards: any, hits: any, aggregations: map, _clusters: any, fields: map, max_score: num, num_reduce_phases: num, profile: any, pit_id: any, _scroll_id: any, suggest: map, terminated_early: bool}
@example_request "{\n  \"scroll_id\" : \"DXF1ZXJ5QW5kRmV0Y2gBAAAAAAAAAD4WYm9laVYtZndUQlNsdDcwakFMNjU1QQ==\"\n}"

@endpoint DELETE /_search/scroll/{scroll_id}
@desc Clear a scrolling search
@required {scroll_id: any # A comma-separated list of scroll IDs to clear. To clear all scroll IDs, use `_all`. IMPORTANT: Scroll IDs can be long. It is recommended to specify scroll IDs in the request body parameter.}
@optional {scroll_id: any # The scroll IDs to clear. To clear all scroll IDs, use `_all`.}
@returns(200) {succeeded: bool, num_freed: num}
@example_request "{\n  \"scroll_id\": \"DXF1ZXJ5QW5kRmV0Y2gBAAAAAAAAAD4WYm9laVYtZndUQlNsdDcwakFMNjU1QQ==\"\n}"

@endgroup

@group _pit
@endpoint DELETE /_pit
@desc Close a point in time
@required {id: any # The ID of the point-in-time.}
@returns(200) {succeeded: bool, num_freed: num}
@example_request "{\n  \"id\": \"46ToAwMDaWR5BXV1aWQyKwZub2RlXzMAAAAAAAAAACoBYwADaWR4BXV1aWQxAgZub2RlXzEAAAAAAAAAAAEBYQADaWR5BXV1aWQyKgZub2RlXzIAAAAAAAAAAAwBYgACBXV1aWQyAAAFdXVpZDEAAQltYXRjaF9hbGw_gAAAAA==\"\n}"

@endgroup

@group _cluster
@endpoint GET /_cluster/allocation/explain
@desc Explain the shard allocations
@optional {index: str # The name of the index that you would like an explanation for., shard: num # An identifier for the shard that you would like an explanation for., primary: bool # If true, returns an explanation for the primary shard for the specified shard ID., current_node: str # Explain a shard only if it is currently located on the specified node name or node ID., include_disk_info: bool # If true, returns information about disk usage and shard sizes., include_yes_decisions: bool # If true, returns YES decisions in explanation., master_timeout: any # Period to wait for a connection to the master node., index: any # The name of the index that you would like an explanation for., shard: num # An identifier for the shard that you would like an explanation for., primary: bool # If true, returns an explanation for the primary shard for the specified shard ID., current_node: any # Explain a shard only if it is currently located on the specified node name or node ID.}
@returns(200) {allocate_explanation: str, allocation_delay: any, allocation_delay_in_millis: any, can_allocate: any, can_move_to_other_node: any, can_rebalance_cluster: any, can_rebalance_cluster_decisions: [map], can_rebalance_to_other_node: any, can_remain_decisions: [map], can_remain_on_current_node: any, cluster_info: any, configured_delay: any, configured_delay_in_millis: any, current_node: any, current_state: str, index: any, move_explanation: str, node_allocation_decisions: [map], primary: bool, rebalance_explanation: str, remaining_delay: any, remaining_delay_in_millis: any, shard: num, unassigned_info: any, note: str}
@example_request "{\n  \"index\": \"my-index-000001\",\n  \"shard\": 0,\n  \"primary\": false,\n  \"current_node\": \"my-node\"\n}"

@endpoint POST /_cluster/allocation/explain
@desc Explain the shard allocations
@optional {index: str # The name of the index that you would like an explanation for., shard: num # An identifier for the shard that you would like an explanation for., primary: bool # If true, returns an explanation for the primary shard for the specified shard ID., current_node: str # Explain a shard only if it is currently located on the specified node name or node ID., include_disk_info: bool # If true, returns information about disk usage and shard sizes., include_yes_decisions: bool # If true, returns YES decisions in explanation., master_timeout: any # Period to wait for a connection to the master node., index: any # The name of the index that you would like an explanation for., shard: num # An identifier for the shard that you would like an explanation for., primary: bool # If true, returns an explanation for the primary shard for the specified shard ID., current_node: any # Explain a shard only if it is currently located on the specified node name or node ID.}
@returns(200) {allocate_explanation: str, allocation_delay: any, allocation_delay_in_millis: any, can_allocate: any, can_move_to_other_node: any, can_rebalance_cluster: any, can_rebalance_cluster_decisions: [map], can_rebalance_to_other_node: any, can_remain_decisions: [map], can_remain_on_current_node: any, cluster_info: any, configured_delay: any, configured_delay_in_millis: any, current_node: any, current_state: str, index: any, move_explanation: str, node_allocation_decisions: [map], primary: bool, rebalance_explanation: str, remaining_delay: any, remaining_delay_in_millis: any, shard: num, unassigned_info: any, note: str}
@example_request "{\n  \"index\": \"my-index-000001\",\n  \"shard\": 0,\n  \"primary\": false,\n  \"current_node\": \"my-node\"\n}"

@endgroup

@group _component_template
@endpoint GET /_component_template/{name}
@desc Get component templates
@required {name: str # Name of component template to retrieve. Wildcard (`*`) expressions are supported.}
@optional {flat_settings: bool # If `true`, returns settings in flat format., settings_filter: any # Filter out results, for example to filter out sensitive information. Supports wildcards or full settings keys, include_defaults: bool # Return all default configurations for the component template, local: bool # If `true`, the request retrieves information from the local node only. If `false`, information is retrieved from the master node., master_timeout: any # Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error.}
@returns(200) {component_templates: [map]}

@endpoint PUT /_component_template/{name}
@desc Create or update a component template
@required {name: str # Name of the component template to create. Elasticsearch includes the following built-in component templates: `logs-mappings`; `logs-settings`; `metrics-mappings`; `metrics-settings`;`synthetics-mapping`; `synthetics-settings`. Elastic Agent uses these templates to configure backing indices for its data streams. If you use Elastic Agent and want to overwrite one of these templates, set the `version` for your replacement template higher than the current version. If you don’t use Elastic Agent and want to disable all built-in component and index templates, set `stack.templates.enabled` to `false` using the cluster update settings API., template: any # The template to be applied which includes mappings, settings, or aliases configuration.}
@optional {create: bool # If `true`, this request cannot replace or update existing component templates., cause: str # User defined reason for create the component template., master_timeout: any # Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error., version: any # Version number used to manage component templates externally. This number isn't automatically generated or incremented by Elasticsearch. To unset a version, replace the template without specifying a version., _meta: any # Optional user metadata about the component template. It may have any contents. This map is not automatically generated by Elasticsearch. This information is stored in the cluster state, so keeping it short is preferable. To unset `_meta`, replace the template without specifying this information., deprecated: bool # Marks this index template as deprecated. When creating or updating a non-deprecated index template that uses deprecated components, Elasticsearch will emit a deprecation warning.}
@returns(200) {acknowledged: bool}
@example_request "{\n  \"template\": {\n    \"settings\": {\n      \"number_of_shards\": 1\n    },\n    \"mappings\": {\n      \"_source\": {\n        \"enabled\": false\n      },\n      \"properties\": {\n        \"host_name\": {\n          \"type\": \"keyword\"\n        },\n        \"created_at\": {\n          \"type\": \"date\",\n          \"format\": \"EEE MMM dd HH:mm:ss Z yyyy\"\n        }\n      }\n    }\n  }\n}"

@endpoint POST /_component_template/{name}
@desc Create or update a component template
@required {name: str # Name of the component template to create. Elasticsearch includes the following built-in component templates: `logs-mappings`; `logs-settings`; `metrics-mappings`; `metrics-settings`;`synthetics-mapping`; `synthetics-settings`. Elastic Agent uses these templates to configure backing indices for its data streams. If you use Elastic Agent and want to overwrite one of these templates, set the `version` for your replacement template higher than the current version. If you don’t use Elastic Agent and want to disable all built-in component and index templates, set `stack.templates.enabled` to `false` using the cluster update settings API., template: any # The template to be applied which includes mappings, settings, or aliases configuration.}
@optional {create: bool # If `true`, this request cannot replace or update existing component templates., cause: str # User defined reason for create the component template., master_timeout: any # Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error., version: any # Version number used to manage component templates externally. This number isn't automatically generated or incremented by Elasticsearch. To unset a version, replace the template without specifying a version., _meta: any # Optional user metadata about the component template. It may have any contents. This map is not automatically generated by Elasticsearch. This information is stored in the cluster state, so keeping it short is preferable. To unset `_meta`, replace the template without specifying this information., deprecated: bool # Marks this index template as deprecated. When creating or updating a non-deprecated index template that uses deprecated components, Elasticsearch will emit a deprecation warning.}
@returns(200) {acknowledged: bool}
@example_request "{\n  \"template\": {\n    \"settings\": {\n      \"number_of_shards\": 1\n    },\n    \"mappings\": {\n      \"_source\": {\n        \"enabled\": false\n      },\n      \"properties\": {\n        \"host_name\": {\n          \"type\": \"keyword\"\n        },\n        \"created_at\": {\n          \"type\": \"date\",\n          \"format\": \"EEE MMM dd HH:mm:ss Z yyyy\"\n        }\n      }\n    }\n  }\n}"

@endpoint DELETE /_component_template/{name}
@desc Delete component templates
@required {name: any # Comma-separated list or wildcard expression of component template names used to limit the request.}
@optional {master_timeout: any # Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error., timeout: any # Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error.}
@returns(200) {acknowledged: bool}

@endpoint HEAD /_component_template/{name}
@desc Check component templates
@required {name: any # Comma-separated list of component template names used to limit the request. Wildcard (*) expressions are supported.}
@optional {master_timeout: any # Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error., local: bool # If true, the request retrieves information from the local node only. Defaults to false, which means information is retrieved from the master node.}
@returns(200)

@endgroup

@group _cluster
@endpoint POST /_cluster/voting_config_exclusions
@desc Update voting configuration exclusions
@optional {node_names: any # A comma-separated list of the names of the nodes to exclude from the voting configuration. If specified, you may not also specify node_ids., node_ids: any # A comma-separated list of the persistent ids of the nodes to exclude from the voting configuration. If specified, you may not also specify node_names., master_timeout: any # Period to wait for a connection to the master node., timeout: any # When adding a voting configuration exclusion, the API waits for the specified nodes to be excluded from the voting configuration before returning. If the timeout expires before the appropriate condition is satisfied, the request fails and returns an error.}
@returns(200)

@endpoint DELETE /_cluster/voting_config_exclusions
@desc Clear cluster voting config exclusions
@optional {master_timeout: any # Period to wait for a connection to the master node., wait_for_removal: bool # Specifies whether to wait for all excluded nodes to be removed from the cluster before clearing the voting configuration exclusions list. Defaults to true, meaning that all excluded nodes must be removed from the cluster before this API takes any action. If set to false then the voting configuration exclusions list is cleared even if some excluded nodes are still in the cluster.}
@returns(200)

@endgroup

@group _component_template
@endpoint GET /_component_template
@desc Get component templates
@optional {flat_settings: bool # If `true`, returns settings in flat format., settings_filter: any # Filter out results, for example to filter out sensitive information. Supports wildcards or full settings keys, include_defaults: bool # Return all default configurations for the component template, local: bool # If `true`, the request retrieves information from the local node only. If `false`, information is retrieved from the master node., master_timeout: any # Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error.}
@returns(200) {component_templates: [map]}

@endgroup

@group _cluster
@endpoint GET /_cluster/settings
@desc Get cluster-wide settings
@optional {flat_settings: bool # If `true`, returns settings in flat format., include_defaults: bool # If `true`, also returns the values of all other cluster settings set in the `elasticsearch.yml` file on one of the nodes in your cluster, together with the default values of all other cluster settings on that node. The default value of each setting may depend on the values of other settings on that node. If the nodes in your cluster do not all have the same configuration then the values returned by this API may vary from invocation to invocation and may not reflect the values that Elasticsearch uses in all situations. Use the `GET _nodes/settings` API to fetch the settings for each individual node in your cluster., master_timeout: any # Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error., timeout: any # Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error.}
@returns(200) {persistent: map, transient: map, defaults: map}

@endpoint PUT /_cluster/settings
@desc Update the cluster settings
@optional {flat_settings: bool # Return settings in flat format, master_timeout: any # The period to wait for a connection to the master node., timeout: any # The period to wait for a response., persistent: map # The settings that persist after the cluster restarts., transient: map # The settings that do not persist after the cluster restarts.}
@returns(200) {acknowledged: bool, persistent: map, transient: map}
@example_request "{\n  \"persistent\" : {\n    \"indices.recovery.max_bytes_per_sec\" : \"50mb\"\n  }\n}"

@endpoint GET /_cluster/health
@desc Get the cluster health status
@optional {expand_wildcards: any # Expand wildcard expression to concrete indices that are open, closed or both., level: str # Return health information at a specific level of detail., local: bool # If true, retrieve information from the local node only. If false, retrieve information from the master node., master_timeout: any # The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error., timeout: any # The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error., wait_for_active_shards: any # Wait for the specified number of active shards. Use `all` to wait for all shards in the cluster to be active. Use `0` to not wait., wait_for_events: str # Wait until all currently queued events with the given priority are processed., wait_for_nodes: any # Wait until the specified number (N) of nodes is available. It also accepts `>=N`, `N` and `<N`. Alternatively, use the notations `ge(N)`, `le(N)`, `gt(N)`, and `lt(N)`., wait_for_no_initializing_shards: bool # Wait (until the timeout expires) for the cluster to have no shard initializations. If false, the request does not wait for initializing shards., wait_for_no_relocating_shards: bool # Wait (until the timeout expires) for the cluster to have no shard relocations. If false, the request not wait for relocating shards., wait_for_status: str # Wait (until the timeout expires) for the cluster to reach a specific health status (or a better status). A green status is better than yellow and yellow is better than red. By default, the request does not wait for a particular status.}
@returns(200) {active_primary_shards: num, active_shards: num, active_shards_percent: str, active_shards_percent_as_number: num, cluster_name: any, delayed_unassigned_shards: num, indices: map, initializing_shards: num, number_of_data_nodes: num, number_of_in_flight_fetch: num, number_of_nodes: num, number_of_pending_tasks: num, relocating_shards: num, status: any, task_max_waiting_in_queue: any, task_max_waiting_in_queue_millis: any, timed_out: bool, unassigned_primary_shards: num, unassigned_shards: num}

@endpoint GET /_cluster/health/{index}
@desc Get the cluster health status
@required {index: any # A comma-separated list of data streams, indices, and index aliases that limit the request. Wildcard expressions (`*`) are supported. To target all data streams and indices in a cluster, omit this parameter or use _all or `*`.}
@optional {expand_wildcards: any # Expand wildcard expression to concrete indices that are open, closed or both., level: str # Return health information at a specific level of detail., local: bool # If true, retrieve information from the local node only. If false, retrieve information from the master node., master_timeout: any # The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error., timeout: any # The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error., wait_for_active_shards: any # Wait for the specified number of active shards. Use `all` to wait for all shards in the cluster to be active. Use `0` to not wait., wait_for_events: str # Wait until all currently queued events with the given priority are processed., wait_for_nodes: any # Wait until the specified number (N) of nodes is available. It also accepts `>=N`, `N` and `<N`. Alternatively, use the notations `ge(N)`, `le(N)`, `gt(N)`, and `lt(N)`., wait_for_no_initializing_shards: bool # Wait (until the timeout expires) for the cluster to have no shard initializations. If false, the request does not wait for initializing shards., wait_for_no_relocating_shards: bool # Wait (until the timeout expires) for the cluster to have no shard relocations. If false, the request not wait for relocating shards., wait_for_status: str # Wait (until the timeout expires) for the cluster to reach a specific health status (or a better status). A green status is better than yellow and yellow is better than red. By default, the request does not wait for a particular status.}
@returns(200) {active_primary_shards: num, active_shards: num, active_shards_percent: str, active_shards_percent_as_number: num, cluster_name: any, delayed_unassigned_shards: num, indices: map, initializing_shards: num, number_of_data_nodes: num, number_of_in_flight_fetch: num, number_of_nodes: num, number_of_pending_tasks: num, relocating_shards: num, status: any, task_max_waiting_in_queue: any, task_max_waiting_in_queue_millis: any, timed_out: bool, unassigned_primary_shards: num, unassigned_shards: num}

@endgroup

@group _info
@endpoint GET /_info/{target}
@desc Get cluster info
@required {target: any # Limits the information returned to the specific target. Supports a comma-separated list, such as http,ingest.}
@returns(200) {cluster_name: any, http: any, ingest: any, thread_pool: map, script: any}

@endgroup

@group _cluster
@endpoint GET /_cluster/pending_tasks
@desc Get the pending cluster tasks
@optional {local: bool # If `true`, the request retrieves information from the local node only. If `false`, information is retrieved from the master node., master_timeout: any # Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error.}
@returns(200) {tasks: [map]}

@endgroup

@group _remote
@endpoint GET /_remote/info
@desc Get remote cluster information
@returns(200)

@endgroup

@group _cluster
@endpoint POST /_cluster/reroute
@desc Reroute the cluster
@optional {dry_run: bool # If true, then the request simulates the operation. It will calculate the result of applying the commands to the current cluster state and return the resulting cluster state after the commands (and rebalancing) have been applied; it will not actually perform the requested changes., explain: bool # If true, then the response contains an explanation of why the commands can or cannot run., metric: any # Limits the information returned to the specified metrics., retry_failed: bool # If true, then retries allocation of shards that are blocked due to too many subsequent allocation failures., master_timeout: any # Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error., timeout: any # Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error., commands: [map{cancel: any, move: any, allocate_replica: any, allocate_stale_primary: any, allocate_empty_primary: any}] # Defines the commands to perform.}
@returns(200) {acknowledged: bool, explanations: [map], state: map}
@example_request "{\n  \"commands\": [\n    {\n      \"move\": {\n        \"index\": \"test\", \"shard\": 0,\n        \"from_node\": \"node1\", \"to_node\": \"node2\"\n      }\n    },\n    {\n      \"allocate_replica\": {\n        \"index\": \"test\", \"shard\": 1,\n        \"node\": \"node3\"\n      }\n    }\n  ]\n}"

@endpoint GET /_cluster/state
@desc Get the cluster state
@optional {allow_no_indices: bool # A setting that does two separate checks on the index expression. If `false`, the request returns an error (1) if any wildcard expression (including `_all` and `*`) resolves to zero matching indices or (2) if the complete set of resolved indices, aliases or data streams is empty after all expressions are evaluated. If `true`, index expressions that resolve to no indices are allowed and the request returns an empty result., expand_wildcards: any # Whether to expand wildcard expression to concrete indices that are open, closed or both, flat_settings: bool # Return settings in flat format, ignore_unavailable: bool # If `false`, the request returns an error if it targets a concrete (non-wildcarded) index, alias, or data stream that is missing, closed, or otherwise unavailable. If `true`, unavailable concrete targets are silently ignored., local: bool # Return local information, do not retrieve the state from master node, master_timeout: any # Timeout for waiting for new cluster state in case it is blocked, wait_for_metadata_version: num # Wait for the metadata version to be equal or greater than the specified metadata version, wait_for_timeout: any # The maximum time to wait for wait_for_metadata_version before timing out}
@returns(200)

@endpoint GET /_cluster/state/{metric}
@desc Get the cluster state
@required {metric: any # Limit the information returned to the specified metrics.}
@optional {allow_no_indices: bool # A setting that does two separate checks on the index expression. If `false`, the request returns an error (1) if any wildcard expression (including `_all` and `*`) resolves to zero matching indices or (2) if the complete set of resolved indices, aliases or data streams is empty after all expressions are evaluated. If `true`, index expressions that resolve to no indices are allowed and the request returns an empty result., expand_wildcards: any # Whether to expand wildcard expression to concrete indices that are open, closed or both, flat_settings: bool # Return settings in flat format, ignore_unavailable: bool # If `false`, the request returns an error if it targets a concrete (non-wildcarded) index, alias, or data stream that is missing, closed, or otherwise unavailable. If `true`, unavailable concrete targets are silently ignored., local: bool # Return local information, do not retrieve the state from master node, master_timeout: any # Timeout for waiting for new cluster state in case it is blocked, wait_for_metadata_version: num # Wait for the metadata version to be equal or greater than the specified metadata version, wait_for_timeout: any # The maximum time to wait for wait_for_metadata_version before timing out}
@returns(200)

@endpoint GET /_cluster/state/{metric}/{index}
@desc Get the cluster state
@required {metric: any # Limit the information returned to the specified metrics., index: any # A comma-separated list of index names; use `_all` or empty string to perform the operation on all indices}
@optional {allow_no_indices: bool # A setting that does two separate checks on the index expression. If `false`, the request returns an error (1) if any wildcard expression (including `_all` and `*`) resolves to zero matching indices or (2) if the complete set of resolved indices, aliases or data streams is empty after all expressions are evaluated. If `true`, index expressions that resolve to no indices are allowed and the request returns an empty result., expand_wildcards: any # Whether to expand wildcard expression to concrete indices that are open, closed or both, flat_settings: bool # Return settings in flat format, ignore_unavailable: bool # If `false`, the request returns an error if it targets a concrete (non-wildcarded) index, alias, or data stream that is missing, closed, or otherwise unavailable. If `true`, unavailable concrete targets are silently ignored., local: bool # Return local information, do not retrieve the state from master node, master_timeout: any # Timeout for waiting for new cluster state in case it is blocked, wait_for_metadata_version: num # Wait for the metadata version to be equal or greater than the specified metadata version, wait_for_timeout: any # The maximum time to wait for wait_for_metadata_version before timing out}
@returns(200)

@endpoint GET /_cluster/stats
@desc Get cluster statistics
@optional {include_remotes: bool # Include remote cluster data into the response, timeout: any # Period to wait for each node to respond. If a node does not respond before its timeout expires, the response does not include its stats. However, timed out nodes are included in the response’s `_nodes.failed` property. Defaults to no timeout.}
@returns(200)

@endpoint GET /_cluster/stats/nodes/{node_id}
@desc Get cluster statistics
@required {node_id: any # Comma-separated list of node filters used to limit returned information. Defaults to all nodes in the cluster.}
@optional {include_remotes: bool # Include remote cluster data into the response, timeout: any # Period to wait for each node to respond. If a node does not respond before its timeout expires, the response does not include its stats. However, timed out nodes are included in the response’s `_nodes.failed` property. Defaults to no timeout.}
@returns(200)

@endgroup

@group _connector
@endpoint PUT /_connector/{connector_id}/_check_in
@desc Check in a connector
@required {connector_id: str # The unique identifier of the connector to be checked in}
@returns(200) {result: any}

@endpoint GET /_connector/{connector_id}
@desc Get a connector
@required {connector_id: str # The unique identifier of the connector}
@optional {include_deleted: bool # A flag to indicate if the desired connector should be fetched, even if it was soft-deleted.}
@returns(200) {api_key_id: str, api_key_secret_id: str, configuration: any, custom_scheduling: any, deleted: bool, description: str, error: any, features: any, filtering: [map], id: any, index_name: any, is_native: bool, language: str, last_access_control_sync_error: str, last_access_control_sync_scheduled_at: any, last_access_control_sync_status: any, last_deleted_document_count: num, last_incremental_sync_scheduled_at: any, last_indexed_document_count: num, last_seen: any, last_sync_error: str, last_sync_scheduled_at: any, last_sync_status: any, last_synced: any, name: str, pipeline: any, scheduling: any, service_type: str, status: any, sync_cursor: map, sync_now: bool}

@endpoint PUT /_connector/{connector_id}
@desc Create or update a connector
@required {connector_id: str # The unique identifier of the connector to be created or updated. ID is auto-generated if not provided.}
@optional {description: str, index_name: any, is_native: bool, language: str, name: str, service_type: str}
@returns(200) {result: any, id: any}
@example_request "{\n  \"index_name\": \"search-google-drive\",\n  \"name\": \"My Connector\",\n  \"service_type\": \"google_drive\"\n}"

@endpoint DELETE /_connector/{connector_id}
@desc Delete a connector
@required {connector_id: str # The unique identifier of the connector to be deleted}
@optional {delete_sync_jobs: bool # A flag indicating if associated sync jobs should be also removed., hard: bool # A flag indicating if the connector should be hard deleted.}
@returns(200) {acknowledged: bool}

@endpoint GET /_connector
@desc Get all connectors
@optional {from: num # Starting offset, size: num # Specifies a max number of results to get, index_name: any # A comma-separated list of connector index names to fetch connector documents for, connector_name: any # A comma-separated list of connector names to fetch connector documents for, service_type: any # A comma-separated list of connector service types to fetch connector documents for, include_deleted: bool # A flag to indicate if the desired connector should be fetched, even if it was soft-deleted., query: str # A wildcard query string that filters connectors with matching name, description or index name}
@returns(200) {count: num, results: [map]}

@endpoint PUT /_connector
@desc Create or update a connector
@optional {description: str, index_name: any, is_native: bool, language: str, name: str, service_type: str}
@returns(200) {result: any, id: any}
@example_request "{\n  \"index_name\": \"search-google-drive\",\n  \"name\": \"My Connector\",\n  \"service_type\": \"google_drive\"\n}"

@endpoint POST /_connector
@desc Create a connector
@optional {description: str, index_name: any, is_native: bool, language: str, name: str, service_type: str}
@returns(200) {result: any, id: any}

@endpoint PUT /_connector/_sync_job/{connector_sync_job_id}/_cancel
@desc Cancel a connector sync job
@required {connector_sync_job_id: str # The unique identifier of the connector sync job}
@returns(200) {result: any}

@endpoint PUT /_connector/_sync_job/{connector_sync_job_id}/_check_in
@desc Check in a connector sync job
@required {connector_sync_job_id: str # The unique identifier of the connector sync job to be checked in.}
@returns(200)

@endpoint PUT /_connector/_sync_job/{connector_sync_job_id}/_claim
@desc Claim a connector sync job
@required {connector_sync_job_id: str # The unique identifier of the connector sync job., worker_hostname: str # The host name of the current system that will run the job.}
@optional {sync_cursor: map # The cursor object from the last incremental sync job. This should reference the `sync_cursor` field in the connector state for which the job runs.}
@returns(200)
@example_request "{\n  \"worker_hostname\": \"some-machine\"\n}"

@endpoint GET /_connector/_sync_job/{connector_sync_job_id}
@desc Get a connector sync job
@required {connector_sync_job_id: str # The unique identifier of the connector sync job}
@returns(200) {cancelation_requested_at: any, canceled_at: any, completed_at: any, connector: any, created_at: any, deleted_document_count: num, error: str, id: any, indexed_document_count: num, indexed_document_volume: num, job_type: any, last_seen: any, metadata: map, started_at: any, status: any, total_document_count: num, trigger_method: any, worker_hostname: str}

@endpoint DELETE /_connector/_sync_job/{connector_sync_job_id}
@desc Delete a connector sync job
@required {connector_sync_job_id: str # The unique identifier of the connector sync job to be deleted}
@returns(200) {acknowledged: bool}

@endpoint PUT /_connector/_sync_job/{connector_sync_job_id}/_error
@desc Set a connector sync job error
@required {connector_sync_job_id: str # The unique identifier for the connector sync job., error: str # The error for the connector sync job error field.}
@returns(200)
@example_request "{\n    \"error\": \"some-error\"\n}"

@endpoint GET /_connector/_sync_job
@desc Get all connector sync jobs
@optional {from: num # Starting offset, size: num # Specifies a max number of results to get, status: str # A sync job status to fetch connector sync jobs for, connector_id: str # A connector id to fetch connector sync jobs for, job_type: any # A comma-separated list of job types to fetch the sync jobs for}
@returns(200) {count: num, results: [map]}

@endpoint POST /_connector/_sync_job
@desc Create a connector sync job
@required {id: any # The id of the associated connector}
@optional {job_type: any, trigger_method: any}
@returns(200) {id: any}
@example_request "{\n  \"id\": \"connector-id\",\n  \"job_type\": \"full\",\n  \"trigger_method\": \"on_demand\"\n}"

@endpoint PUT /_connector/_sync_job/{connector_sync_job_id}/_stats
@desc Set the connector sync job stats
@required {connector_sync_job_id: str # The unique identifier of the connector sync job., deleted_document_count: num # The number of documents the sync job deleted., indexed_document_count: num # The number of documents the sync job indexed., indexed_document_volume: num # The total size of the data (in MiB) the sync job indexed.}
@optional {last_seen: any # The timestamp to use in the `last_seen` property for the connector sync job., metadata: any # The connector-specific metadata., total_document_count: num # The total number of documents in the target index after the sync job finished.}
@returns(200)
@example_request "{\n    \"deleted_document_count\": 10,\n    \"indexed_document_count\": 20,\n    \"indexed_document_volume\": 1000,\n    \"total_document_count\": 2000,\n    \"last_seen\": \"2023-01-02T10:00:00Z\"\n}"

@endpoint PUT /_connector/{connector_id}/_filtering/_activate
@desc Activate the connector draft filter
@required {connector_id: str # The unique identifier of the connector to be updated}
@returns(200) {result: any}

@endpoint PUT /_connector/{connector_id}/_api_key_id
@desc Update the connector API key ID
@required {connector_id: str # The unique identifier of the connector to be updated}
@optional {api_key_id: str, api_key_secret_id: str}
@returns(200) {result: any}
@example_request "{\n    \"api_key_id\": \"my-api-key-id\",\n    \"api_key_secret_id\": \"my-connector-secret-id\"\n}"

@endpoint PUT /_connector/{connector_id}/_configuration
@desc Update the connector configuration
@required {connector_id: str # The unique identifier of the connector to be updated}
@optional {configuration: any, values: map}
@returns(200) {result: any}
@example_request "{\n    \"values\": {\n        \"tenant_id\": \"my-tenant-id\",\n        \"tenant_name\": \"my-sharepoint-site\",\n        \"client_id\": \"foo\",\n        \"secret_value\": \"bar\",\n        \"site_collections\": \"*\"\n    }\n}"

@endpoint PUT /_connector/{connector_id}/_error
@desc Update the connector error field
@required {connector_id: str # The unique identifier of the connector to be updated, error: any}
@returns(200) {result: any}
@example_request "{\n    \"error\": \"Houston, we have a problem!\"\n}"

@endpoint PUT /_connector/{connector_id}/_features
@desc Update the connector features
@required {connector_id: str # The unique identifier of the connector to be updated., features: any}
@returns(200) {result: any}
@example_request "{\n  \"features\": {\n    \"document_level_security\": {\n      \"enabled\": true\n    },\n    \"incremental_sync\": {\n      \"enabled\": true\n    },\n    \"sync_rules\": {\n      \"advanced\": {\n        \"enabled\": false\n      },\n      \"basic\": {\n        \"enabled\": true\n      }\n    }\n  }\n}"

@endpoint PUT /_connector/{connector_id}/_filtering
@desc Update the connector filtering
@required {connector_id: str # The unique identifier of the connector to be updated}
@optional {filtering: [map{active!: any, domain: str, draft!: any}], rules: [map{created_at: any, field!: any, id!: any, order!: num, policy!: any, rule!: any, updated_at: any, value!: str}], advanced_snippet: any}
@returns(200) {result: any}
@example_request "{\n    \"rules\": [\n         {\n            \"field\": \"file_extension\",\n            \"id\": \"exclude-txt-files\",\n            \"order\": 0,\n            \"policy\": \"exclude\",\n            \"rule\": \"equals\",\n            \"value\": \"txt\"\n        },\n        {\n            \"field\": \"_\",\n            \"id\": \"DEFAULT\",\n            \"order\": 1,\n            \"policy\": \"include\",\n            \"rule\": \"regex\",\n            \"value\": \".*\"\n        }\n    ]\n}"

@endpoint PUT /_connector/{connector_id}/_filtering/_validation
@desc Update the connector draft filtering validation
@required {connector_id: str # The unique identifier of the connector to be updated, validation: any}
@returns(200) {result: any}

@endpoint PUT /_connector/{connector_id}/_index_name
@desc Update the connector index name
@required {connector_id: str # The unique identifier of the connector to be updated, index_name: any}
@returns(200) {result: any}
@example_request "{\n    \"index_name\": \"data-from-my-google-drive\"\n}"

@endpoint PUT /_connector/{connector_id}/_name
@desc Update the connector name and description
@required {connector_id: str # The unique identifier of the connector to be updated}
@optional {name: str, description: str}
@returns(200) {result: any}
@example_request "{\n    \"name\": \"Custom connector\",\n    \"description\": \"This is my customized connector\"\n}"

@endpoint PUT /_connector/{connector_id}/_native
@desc Update the connector is_native flag
@required {connector_id: str # The unique identifier of the connector to be updated, is_native: bool}
@returns(200) {result: any}

@endpoint PUT /_connector/{connector_id}/_pipeline
@desc Update the connector pipeline
@required {connector_id: str # The unique identifier of the connector to be updated, pipeline: any}
@returns(200) {result: any}
@example_request "{\n    \"pipeline\": {\n        \"extract_binary_content\": true,\n        \"name\": \"my-connector-pipeline\",\n        \"reduce_whitespace\": true,\n        \"run_ml_inference\": true\n    }\n}"

@endpoint PUT /_connector/{connector_id}/_scheduling
@desc Update the connector scheduling
@required {connector_id: str # The unique identifier of the connector to be updated, scheduling: any}
@returns(200) {result: any}
@example_request "{\n    \"scheduling\": {\n        \"access_control\": {\n            \"enabled\": true,\n            \"interval\": \"0 10 0 * * ?\"\n        },\n        \"full\": {\n            \"enabled\": true,\n            \"interval\": \"0 20 0 * * ?\"\n        },\n        \"incremental\": {\n            \"enabled\": false,\n            \"interval\": \"0 30 0 * * ?\"\n        }\n    }\n}"

@endpoint PUT /_connector/{connector_id}/_service_type
@desc Update the connector service type
@required {connector_id: str # The unique identifier of the connector to be updated, service_type: str}
@returns(200) {result: any}
@example_request "{\n    \"service_type\": \"sharepoint_online\"\n}"

@endpoint PUT /_connector/{connector_id}/_status
@desc Update the connector status
@required {connector_id: str # The unique identifier of the connector to be updated, status: any}
@returns(200) {result: any}
@example_request "{\n    \"status\": \"needs_configuration\"\n}"

@endgroup

@group _count
@endpoint GET /_count
@desc Count search results
@optional {allow_no_indices: bool # A setting that does two separate checks on the index expression. If `false`, the request returns an error (1) if any wildcard expression (including `_all` and `*`) resolves to zero matching indices or (2) if the complete set of resolved indices, aliases or data streams is empty after all expressions are evaluated. If `true`, index expressions that resolve to no indices are allowed and the request returns an empty result., analyzer: str # The analyzer to use for the query string. This parameter can be used only when the `q` query string parameter is specified., analyze_wildcard: bool # If `true`, wildcard and prefix queries are analyzed. This parameter can be used only when the `q` query string parameter is specified., default_operator: str # The default operator for query string query: `and` or `or`. This parameter can be used only when the `q` query string parameter is specified., df: str # The field to use as a default when no field prefix is given in the query string. This parameter can be used only when the `q` query string parameter is specified., expand_wildcards: any # The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. It supports comma-separated values, such as `open,hidden`., ignore_throttled: bool # If `true`, concrete, expanded, or aliased indices are ignored when frozen., ignore_unavailable: bool # If `false`, the request returns an error if it targets a concrete (non-wildcarded) index, alias, or data stream that is missing, closed, or otherwise unavailable. If `true`, unavailable concrete targets are silently ignored., lenient: bool # If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored. This parameter can be used only when the `q` query string parameter is specified., min_score: num # The minimum `_score` value that documents must have to be included in the result., preference: str # The node or shard the operation should be performed on. By default, it is random., routing: any # A custom value used to route operations to a specific shard., terminate_after: num # The maximum number of documents to collect for each shard. If a query reaches this limit, Elasticsearch terminates the query early. Elasticsearch collects documents before sorting.  IMPORTANT: Use with caution. Elasticsearch applies this parameter to each shard handling the request. When possible, let Elasticsearch perform early termination automatically. Avoid specifying this parameter for requests that target data streams with backing indices across multiple data tiers., q: str # The query in Lucene query string syntax. This parameter cannot be used with a request body., query: any # Defines the search query using Query DSL. A request body query cannot be used with the `q` query string parameter.}
@returns(200) {count: num, _shards: any}
@example_request "{\n  \"query\" : {\n    \"term\" : { \"user.id\" : \"kimchy\" }\n  }\n}"

@endpoint POST /_count
@desc Count search results
@optional {allow_no_indices: bool # A setting that does two separate checks on the index expression. If `false`, the request returns an error (1) if any wildcard expression (including `_all` and `*`) resolves to zero matching indices or (2) if the complete set of resolved indices, aliases or data streams is empty after all expressions are evaluated. If `true`, index expressions that resolve to no indices are allowed and the request returns an empty result., analyzer: str # The analyzer to use for the query string. This parameter can be used only when the `q` query string parameter is specified., analyze_wildcard: bool # If `true`, wildcard and prefix queries are analyzed. This parameter can be used only when the `q` query string parameter is specified., default_operator: str # The default operator for query string query: `and` or `or`. This parameter can be used only when the `q` query string parameter is specified., df: str # The field to use as a default when no field prefix is given in the query string. This parameter can be used only when the `q` query string parameter is specified., expand_wildcards: any # The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. It supports comma-separated values, such as `open,hidden`., ignore_throttled: bool # If `true`, concrete, expanded, or aliased indices are ignored when frozen., ignore_unavailable: bool # If `false`, the request returns an error if it targets a concrete (non-wildcarded) index, alias, or data stream that is missing, closed, or otherwise unavailable. If `true`, unavailable concrete targets are silently ignored., lenient: bool # If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored. This parameter can be used only when the `q` query string parameter is specified., min_score: num # The minimum `_score` value that documents must have to be included in the result., preference: str # The node or shard the operation should be performed on. By default, it is random., routing: any # A custom value used to route operations to a specific shard., terminate_after: num # The maximum number of documents to collect for each shard. If a query reaches this limit, Elasticsearch terminates the query early. Elasticsearch collects documents before sorting.  IMPORTANT: Use with caution. Elasticsearch applies this parameter to each shard handling the request. When possible, let Elasticsearch perform early termination automatically. Avoid specifying this parameter for requests that target data streams with backing indices across multiple data tiers., q: str # The query in Lucene query string syntax. This parameter cannot be used with a request body., query: any # Defines the search query using Query DSL. A request body query cannot be used with the `q` query string parameter.}
@returns(200) {count: num, _shards: any}
@example_request "{\n  \"query\" : {\n    \"term\" : { \"user.id\" : \"kimchy\" }\n  }\n}"

@endgroup

@group {index}
@endpoint GET /{index}/_count
@desc Count search results
@required {index: any # A comma-separated list of data streams, indices, and aliases to search. It supports wildcards (`*`). To search all data streams and indices, omit this parameter or use `*` or `_all`.}
@optional {allow_no_indices: bool # A setting that does two separate checks on the index expression. If `false`, the request returns an error (1) if any wildcard expression (including `_all` and `*`) resolves to zero matching indices or (2) if the complete set of resolved indices, aliases or data streams is empty after all expressions are evaluated. If `true`, index expressions that resolve to no indices are allowed and the request returns an empty result., analyzer: str # The analyzer to use for the query string. This parameter can be used only when the `q` query string parameter is specified., analyze_wildcard: bool # If `true`, wildcard and prefix queries are analyzed. This parameter can be used only when the `q` query string parameter is specified., default_operator: str # The default operator for query string query: `and` or `or`. This parameter can be used only when the `q` query string parameter is specified., df: str # The field to use as a default when no field prefix is given in the query string. This parameter can be used only when the `q` query string parameter is specified., expand_wildcards: any # The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. It supports comma-separated values, such as `open,hidden`., ignore_throttled: bool # If `true`, concrete, expanded, or aliased indices are ignored when frozen., ignore_unavailable: bool # If `false`, the request returns an error if it targets a concrete (non-wildcarded) index, alias, or data stream that is missing, closed, or otherwise unavailable. If `true`, unavailable concrete targets are silently ignored., lenient: bool # If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored. This parameter can be used only when the `q` query string parameter is specified., min_score: num # The minimum `_score` value that documents must have to be included in the result., preference: str # The node or shard the operation should be performed on. By default, it is random., routing: any # A custom value used to route operations to a specific shard., terminate_after: num # The maximum number of documents to collect for each shard. If a query reaches this limit, Elasticsearch terminates the query early. Elasticsearch collects documents before sorting.  IMPORTANT: Use with caution. Elasticsearch applies this parameter to each shard handling the request. When possible, let Elasticsearch perform early termination automatically. Avoid specifying this parameter for requests that target data streams with backing indices across multiple data tiers., q: str # The query in Lucene query string syntax. This parameter cannot be used with a request body., query: any # Defines the search query using Query DSL. A request body query cannot be used with the `q` query string parameter.}
@returns(200) {count: num, _shards: any}
@example_request "{\n  \"query\" : {\n    \"term\" : { \"user.id\" : \"kimchy\" }\n  }\n}"

@endpoint POST /{index}/_count
@desc Count search results
@required {index: any # A comma-separated list of data streams, indices, and aliases to search. It supports wildcards (`*`). To search all data streams and indices, omit this parameter or use `*` or `_all`.}
@optional {allow_no_indices: bool # A setting that does two separate checks on the index expression. If `false`, the request returns an error (1) if any wildcard expression (including `_all` and `*`) resolves to zero matching indices or (2) if the complete set of resolved indices, aliases or data streams is empty after all expressions are evaluated. If `true`, index expressions that resolve to no indices are allowed and the request returns an empty result., analyzer: str # The analyzer to use for the query string. This parameter can be used only when the `q` query string parameter is specified., analyze_wildcard: bool # If `true`, wildcard and prefix queries are analyzed. This parameter can be used only when the `q` query string parameter is specified., default_operator: str # The default operator for query string query: `and` or `or`. This parameter can be used only when the `q` query string parameter is specified., df: str # The field to use as a default when no field prefix is given in the query string. This parameter can be used only when the `q` query string parameter is specified., expand_wildcards: any # The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. It supports comma-separated values, such as `open,hidden`., ignore_throttled: bool # If `true`, concrete, expanded, or aliased indices are ignored when frozen., ignore_unavailable: bool # If `false`, the request returns an error if it targets a concrete (non-wildcarded) index, alias, or data stream that is missing, closed, or otherwise unavailable. If `true`, unavailable concrete targets are silently ignored., lenient: bool # If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored. This parameter can be used only when the `q` query string parameter is specified., min_score: num # The minimum `_score` value that documents must have to be included in the result., preference: str # The node or shard the operation should be performed on. By default, it is random., routing: any # A custom value used to route operations to a specific shard., terminate_after: num # The maximum number of documents to collect for each shard. If a query reaches this limit, Elasticsearch terminates the query early. Elasticsearch collects documents before sorting.  IMPORTANT: Use with caution. Elasticsearch applies this parameter to each shard handling the request. When possible, let Elasticsearch perform early termination automatically. Avoid specifying this parameter for requests that target data streams with backing indices across multiple data tiers., q: str # The query in Lucene query string syntax. This parameter cannot be used with a request body., query: any # Defines the search query using Query DSL. A request body query cannot be used with the `q` query string parameter.}
@returns(200) {count: num, _shards: any}
@example_request "{\n  \"query\" : {\n    \"term\" : { \"user.id\" : \"kimchy\" }\n  }\n}"

@endpoint PUT /{index}/_create/{id}
@desc Create a new document in the index
@required {index: str # The name of the data stream or index to target. If the target doesn't exist and matches the name or wildcard (`*`) pattern of an index template with a `data_stream` definition, this request creates the data stream. If the target doesn't exist and doesn’t match a data stream template, this request creates the index., id: str # A unique identifier for the document. To automatically generate a document ID, use the `POST //_doc/` request format.}
@optional {include_source_on_error: bool # True or false if to include the document source in the error message in case of parsing errors., pipeline: str # The ID of the pipeline to use to preprocess incoming documents. If the index has a default ingest pipeline specified, setting the value to `_none` turns off the default ingest pipeline for this request. If a final pipeline is configured, it will always run regardless of the value of this parameter., refresh: str # If `true`, Elasticsearch refreshes the affected shards to make this operation visible to search. If `wait_for`, it waits for a refresh to make this operation visible to search. If `false`, it does nothing with refreshes., require_alias: bool # If `true`, the destination must be an index alias., require_data_stream: bool # If `true`, the request's actions must target a data stream (existing or to be created)., routing: any # A custom value that is used to route operations to a specific shard., timeout: any # The period the request waits for the following operations: automatic index creation, dynamic mapping updates, waiting for active shards. Elasticsearch waits for at least the specified timeout period before failing. The actual wait time could be longer, particularly when multiple waits occur.  This parameter is useful for situations where the primary shard assigned to perform the operation might not be available when the operation runs. Some reasons for this might be that the primary shard is currently recovering from a gateway or undergoing relocation. By default, the operation will wait on the primary shard to become available for at least 1 minute before failing and responding with an error. The actual wait time could be longer, particularly when multiple waits occur., version: num # The explicit version number for concurrency control. It must be a non-negative long number., version_type: str # The version type., wait_for_active_shards: any # The number of shard copies that must be active before proceeding with the operation. You can set it to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). The default value of `1` means it waits for each primary shard to be active.}
@returns(200) {_id: any, _index: any, _primary_term: num, result: any, _seq_no: any, _shards: any, _version: any, failure_store: any, forced_refresh: bool}
@example_request "{\n  \"@timestamp\": \"2099-11-15T13:12:00\",\n  \"message\": \"GET /search HTTP/1.1 200 1070000\",\n  \"user\": {\n    \"id\": \"kimchy\"\n  }\n}"

@endpoint POST /{index}/_create/{id}
@desc Create a new document in the index
@required {index: str # The name of the data stream or index to target. If the target doesn't exist and matches the name or wildcard (`*`) pattern of an index template with a `data_stream` definition, this request creates the data stream. If the target doesn't exist and doesn’t match a data stream template, this request creates the index., id: str # A unique identifier for the document. To automatically generate a document ID, use the `POST //_doc/` request format.}
@optional {include_source_on_error: bool # True or false if to include the document source in the error message in case of parsing errors., pipeline: str # The ID of the pipeline to use to preprocess incoming documents. If the index has a default ingest pipeline specified, setting the value to `_none` turns off the default ingest pipeline for this request. If a final pipeline is configured, it will always run regardless of the value of this parameter., refresh: str # If `true`, Elasticsearch refreshes the affected shards to make this operation visible to search. If `wait_for`, it waits for a refresh to make this operation visible to search. If `false`, it does nothing with refreshes., require_alias: bool # If `true`, the destination must be an index alias., require_data_stream: bool # If `true`, the request's actions must target a data stream (existing or to be created)., routing: any # A custom value that is used to route operations to a specific shard., timeout: any # The period the request waits for the following operations: automatic index creation, dynamic mapping updates, waiting for active shards. Elasticsearch waits for at least the specified timeout period before failing. The actual wait time could be longer, particularly when multiple waits occur.  This parameter is useful for situations where the primary shard assigned to perform the operation might not be available when the operation runs. Some reasons for this might be that the primary shard is currently recovering from a gateway or undergoing relocation. By default, the operation will wait on the primary shard to become available for at least 1 minute before failing and responding with an error. The actual wait time could be longer, particularly when multiple waits occur., version: num # The explicit version number for concurrency control. It must be a non-negative long number., version_type: str # The version type., wait_for_active_shards: any # The number of shard copies that must be active before proceeding with the operation. You can set it to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). The default value of `1` means it waits for each primary shard to be active.}
@returns(200) {_id: any, _index: any, _primary_term: num, result: any, _seq_no: any, _shards: any, _version: any, failure_store: any, forced_refresh: bool}
@example_request "{\n  \"@timestamp\": \"2099-11-15T13:12:00\",\n  \"message\": \"GET /search HTTP/1.1 200 1070000\",\n  \"user\": {\n    \"id\": \"kimchy\"\n  }\n}"

@endgroup

@group _dangling
@endpoint POST /_dangling/{index_uuid}
@desc Import a dangling index
@required {index_uuid: str # The UUID of the index to import. Use the get dangling indices API to locate the UUID.}
@optional {accept_data_loss: bool # This parameter must be set to true to import a dangling index. Because Elasticsearch cannot know where the dangling index data came from or determine which shard copies are fresh and which are stale, it cannot guarantee that the imported data represents the latest state of the index when it was last in the cluster., master_timeout: any # The period to wait for a connection to the master node., timeout: any # The period to wait for a response.}
@returns(200) {acknowledged: bool}

@endpoint DELETE /_dangling/{index_uuid}
@desc Delete a dangling index
@required {index_uuid: str # The UUID of the index to delete. Use the get dangling indices API to find the UUID.}
@optional {accept_data_loss: bool # This parameter must be set to true to acknowledge that it will no longer be possible to recove data from the dangling index., master_timeout: any # The period to wait for a connection to the master node., timeout: any # The period to wait for a response.}
@returns(200) {acknowledged: bool}

@endpoint GET /_dangling
@desc Get the dangling indices
@returns(200) {dangling_indices: [map]}

@endgroup

@group {index}
@endpoint GET /{index}/_doc/{id}
@desc Get a document by its ID
@required {index: str # The name of the index that contains the document., id: str # A unique document identifier.}
@optional {preference: str # The node or shard the operation should be performed on. By default, the operation is randomized between the shard replicas.  If it is set to `_local`, the operation will prefer to be run on a local allocated shard when possible. If it is set to a custom value, the value is used to guarantee that the same shards will be used for the same custom value. This can help with "jumping values" when hitting different shards in different refresh states. A sample value can be something like the web session ID or the user name., realtime: bool # If `true`, the request is real-time as opposed to near-real-time., refresh: bool # If `true`, the request refreshes the relevant shards before retrieving the document. Setting it to `true` should be done after careful thought and verification that this does not cause a heavy load on the system (and slow down indexing)., routing: any # A custom value used to route operations to a specific shard., _source: any # Indicates whether to return the `_source` field (`true` or `false`) or lists the fields to return., _source_excludes: any # A comma-separated list of source fields to exclude from the response. You can also use this parameter to exclude fields from the subset specified in `_source_includes` query parameter. If the `_source` parameter is `false`, this parameter is ignored., _source_exclude_vectors: bool # Whether vectors should be excluded from _source, _source_includes: any # A comma-separated list of source fields to include in the response. If this parameter is specified, only these source fields are returned. You can exclude fields from this subset using the `_source_excludes` query parameter. If the `_source` parameter is `false`, this parameter is ignored., stored_fields: any # A comma-separated list of stored fields to return as part of a hit. If no fields are specified, no stored fields are included in the response. If this field is specified, the `_source` parameter defaults to `false`. Only leaf fields can be retrieved with the `stored_fields` option. Object fields can't be returned; if specified, the request fails., version: num # The version number for concurrency control. It must match the current version of the document for the request to succeed., version_type: str # The version type.}
@returns(200) {_index: any, fields: map, _ignored: [str], found: bool, _id: any, _primary_term: num, _routing: str, _seq_no: any, _source: map, _version: any}

@endpoint PUT /{index}/_doc/{id}
@desc Create or update a document in an index
@required {index: str # The name of the data stream or index to target. If the target doesn't exist and matches the name or wildcard (`*`) pattern of an index template with a `data_stream` definition, this request creates the data stream. If the target doesn't exist and doesn't match a data stream template, this request creates the index. You can check for existing targets with the resolve index API., id: str # A unique identifier for the document. To automatically generate a document ID, use the `POST //_doc/` request format and omit this parameter.}
@optional {if_primary_term: num # Only perform the operation if the document has this primary term., if_seq_no: num # Only perform the operation if the document has this sequence number., include_source_on_error: bool # True or false if to include the document source in the error message in case of parsing errors., op_type: str # Set to `create` to only index the document if it does not already exist (put if absent). If a document with the specified `_id` already exists, the indexing operation will fail. The behavior is the same as using the `/_create` endpoint. If a document ID is specified, this paramater defaults to `index`. Otherwise, it defaults to `create`. If the request targets a data stream, an `op_type` of `create` is required., pipeline: str # The ID of the pipeline to use to preprocess incoming documents. If the index has a default ingest pipeline specified, then setting the value to `_none` disables the default ingest pipeline for this request. If a final pipeline is configured it will always run, regardless of the value of this parameter., refresh: str # If `true`, Elasticsearch refreshes the affected shards to make this operation visible to search. If `wait_for`, it waits for a refresh to make this operation visible to search. If `false`, it does nothing with refreshes., routing: any # A custom value that is used to route operations to a specific shard., timeout: any # The period the request waits for the following operations: automatic index creation, dynamic mapping updates, waiting for active shards.  This parameter is useful for situations where the primary shard assigned to perform the operation might not be available when the operation runs. Some reasons for this might be that the primary shard is currently recovering from a gateway or undergoing relocation. By default, the operation will wait on the primary shard to become available for at least 1 minute before failing and responding with an error. The actual wait time could be longer, particularly when multiple waits occur., version: num # An explicit version number for concurrency control. It must be a non-negative long number., version_type: str # The version type., wait_for_active_shards: any # The number of shard copies that must be active before proceeding with the operation. You can set it to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). The default value of `1` means it waits for each primary shard to be active., require_alias: bool # If `true`, the destination must be an index alias., require_data_stream: bool # If `true`, the request's actions must target a data stream (existing or to be created).}
@returns(200) {_id: any, _index: any, _primary_term: num, result: any, _seq_no: any, _shards: any, _version: any, failure_store: any, forced_refresh: bool}
@example_request "{\n  \"@timestamp\": \"2099-11-15T13:12:00\",\n  \"message\": \"GET /search HTTP/1.1 200 1070000\",\n  \"user\": {\n    \"id\": \"kimchy\"\n  }\n}"

@endpoint POST /{index}/_doc/{id}
@desc Create or update a document in an index
@required {index: str # The name of the data stream or index to target. If the target doesn't exist and matches the name or wildcard (`*`) pattern of an index template with a `data_stream` definition, this request creates the data stream. If the target doesn't exist and doesn't match a data stream template, this request creates the index. You can check for existing targets with the resolve index API., id: str # A unique identifier for the document. To automatically generate a document ID, use the `POST //_doc/` request format and omit this parameter.}
@optional {if_primary_term: num # Only perform the operation if the document has this primary term., if_seq_no: num # Only perform the operation if the document has this sequence number., include_source_on_error: bool # True or false if to include the document source in the error message in case of parsing errors., op_type: str # Set to `create` to only index the document if it does not already exist (put if absent). If a document with the specified `_id` already exists, the indexing operation will fail. The behavior is the same as using the `/_create` endpoint. If a document ID is specified, this paramater defaults to `index`. Otherwise, it defaults to `create`. If the request targets a data stream, an `op_type` of `create` is required., pipeline: str # The ID of the pipeline to use to preprocess incoming documents. If the index has a default ingest pipeline specified, then setting the value to `_none` disables the default ingest pipeline for this request. If a final pipeline is configured it will always run, regardless of the value of this parameter., refresh: str # If `true`, Elasticsearch refreshes the affected shards to make this operation visible to search. If `wait_for`, it waits for a refresh to make this operation visible to search. If `false`, it does nothing with refreshes., routing: any # A custom value that is used to route operations to a specific shard., timeout: any # The period the request waits for the following operations: automatic index creation, dynamic mapping updates, waiting for active shards.  This parameter is useful for situations where the primary shard assigned to perform the operation might not be available when the operation runs. Some reasons for this might be that the primary shard is currently recovering from a gateway or undergoing relocation. By default, the operation will wait on the primary shard to become available for at least 1 minute before failing and responding with an error. The actual wait time could be longer, particularly when multiple waits occur., version: num # An explicit version number for concurrency control. It must be a non-negative long number., version_type: str # The version type., wait_for_active_shards: any # The number of shard copies that must be active before proceeding with the operation. You can set it to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). The default value of `1` means it waits for each primary shard to be active., require_alias: bool # If `true`, the destination must be an index alias., require_data_stream: bool # If `true`, the request's actions must target a data stream (existing or to be created).}
@returns(200) {_id: any, _index: any, _primary_term: num, result: any, _seq_no: any, _shards: any, _version: any, failure_store: any, forced_refresh: bool}
@example_request "{\n  \"@timestamp\": \"2099-11-15T13:12:00\",\n  \"message\": \"GET /search HTTP/1.1 200 1070000\",\n  \"user\": {\n    \"id\": \"kimchy\"\n  }\n}"

@endpoint DELETE /{index}/_doc/{id}
@desc Delete a document
@required {index: str # The name of the target index., id: str # A unique identifier for the document.}
@optional {if_primary_term: num # Only perform the operation if the document has this primary term., if_seq_no: num # Only perform the operation if the document has this sequence number., refresh: str # If `true`, Elasticsearch refreshes the affected shards to make this operation visible to search. If `wait_for`, it waits for a refresh to make this operation visible to search. If `false`, it does nothing with refreshes., routing: any # A custom value used to route operations to a specific shard., timeout: any # The period to wait for active shards.  This parameter is useful for situations where the primary shard assigned to perform the delete operation might not be available when the delete operation runs. Some reasons for this might be that the primary shard is currently recovering from a store or undergoing relocation. By default, the delete operation will wait on the primary shard to become available for up to 1 minute before failing and responding with an error., version: num # An explicit version number for concurrency control. It must match the current version of the document for the request to succeed., version_type: str # The version type., wait_for_active_shards: any # The minimum number of shard copies that must be active before proceeding with the operation. You can set it to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). The default value of `1` means it waits for each primary shard to be active.}
@returns(200) {_id: any, _index: any, _primary_term: num, result: any, _seq_no: any, _shards: any, _version: any, failure_store: any, forced_refresh: bool}

@endpoint HEAD /{index}/_doc/{id}
@desc Check a document
@required {index: str # A comma-separated list of data streams, indices, and aliases. It supports wildcards (`*`)., id: str # A unique document identifier.}
@optional {preference: str # The node or shard the operation should be performed on. By default, the operation is randomized between the shard replicas.  If it is set to `_local`, the operation will prefer to be run on a local allocated shard when possible. If it is set to a custom value, the value is used to guarantee that the same shards will be used for the same custom value. This can help with "jumping values" when hitting different shards in different refresh states. A sample value can be something like the web session ID or the user name., realtime: bool # If `true`, the request is real-time as opposed to near-real-time., refresh: bool # If `true`, the request refreshes the relevant shards before retrieving the document. Setting it to `true` should be done after careful thought and verification that this does not cause a heavy load on the system (and slow down indexing)., routing: any # A custom value used to route operations to a specific shard., _source: any # Indicates whether to return the `_source` field (`true` or `false`) or lists the fields to return., _source_excludes: any # A comma-separated list of source fields to exclude from the response. You can also use this parameter to exclude fields from the subset specified in `_source_includes` query parameter. If the `_source` parameter is `false`, this parameter is ignored., _source_includes: any # A comma-separated list of source fields to include in the response. If this parameter is specified, only these source fields are returned. You can exclude fields from this subset using the `_source_excludes` query parameter. If the `_source` parameter is `false`, this parameter is ignored., stored_fields: any # A comma-separated list of stored fields to return as part of a hit. If no fields are specified, no stored fields are included in the response. If this field is specified, the `_source` parameter defaults to `false`., version: num # Explicit version number for concurrency control. The specified version must match the current version of the document for the request to succeed., version_type: str # The version type.}
@returns(200)

@endpoint POST /{index}/_delete_by_query
@desc Delete documents
@required {index: any # A comma-separated list of data streams, indices, and aliases to search. It supports wildcards (`*`). To search all data streams or indices, omit this parameter or use `*` or `_all`.}
@optional {allow_no_indices: bool # A setting that does two separate checks on the index expression. If `false`, the request returns an error (1) if any wildcard expression (including `_all` and `*`) resolves to zero matching indices or (2) if the complete set of resolved indices, aliases or data streams is empty after all expressions are evaluated. If `true`, index expressions that resolve to no indices are allowed and the request returns an empty result., analyzer: str # Analyzer to use for the query string. This parameter can be used only when the `q` query string parameter is specified., analyze_wildcard: bool # If `true`, wildcard and prefix queries are analyzed. This parameter can be used only when the `q` query string parameter is specified., conflicts: str # What to do if delete by query hits version conflicts: `abort` or `proceed`., default_operator: str # The default operator for query string query: `and` or `or`. This parameter can be used only when the `q` query string parameter is specified., df: str # The field to use as default where no field prefix is given in the query string. This parameter can be used only when the `q` query string parameter is specified., expand_wildcards: any # The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. It supports comma-separated values, such as `open,hidden`., from: num # Skips the specified number of documents., ignore_unavailable: bool # If `false`, the request returns an error if it targets a concrete (non-wildcarded) index, alias, or data stream that is missing, closed, or otherwise unavailable. If `true`, unavailable concrete targets are silently ignored., lenient: bool # If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored. This parameter can be used only when the `q` query string parameter is specified., max_docs: num # The maximum number of documents to process. Defaults to all documents. When set to a value less then or equal to `scroll_size`, a scroll will not be used to retrieve the results for the operation., preference: str # The node or shard the operation should be performed on. It is random by default., refresh: bool # If `true`, Elasticsearch refreshes all shards involved in the delete by query after the request completes. This is different than the delete API's `refresh` parameter, which causes just the shard that received the delete request to be refreshed. Unlike the delete API, it does not support `wait_for`., request_cache: bool # If `true`, the request cache is used for this request. Defaults to the index-level setting., requests_per_second: num # The throttle for this request in sub-requests per second., routing: any # A custom value used to route operations to a specific shard., q: str # A query in the Lucene query string syntax., scroll: any # The period to retain the search context for scrolling., scroll_size: num # The size of the scroll request that powers the operation., search_timeout: any # The explicit timeout for each search request. It defaults to no timeout., search_type: str # The type of the search operation. Available options include `query_then_fetch` and `dfs_query_then_fetch`., slices: any # The number of slices this task should be divided into., sort: [str] # A comma-separated list of `:` pairs., stats: [str] # The specific `tag` of the request for logging and statistical purposes., terminate_after: num # The maximum number of documents to collect for each shard. If a query reaches this limit, Elasticsearch terminates the query early. Elasticsearch collects documents before sorting.  Use with caution. Elasticsearch applies this parameter to each shard handling the request. When possible, let Elasticsearch perform early termination automatically. Avoid specifying this parameter for requests that target data streams with backing indices across multiple data tiers., timeout: any # The period each deletion request waits for active shards., version: bool # If `true`, returns the document version as part of a hit., wait_for_active_shards: any # The number of shard copies that must be active before proceeding with the operation. Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). The `timeout` value controls how long each write request waits for unavailable shards to become available., wait_for_completion: bool # If `true`, the request blocks until the operation is complete. If `false`, Elasticsearch performs some preflight checks, launches the request, and returns a task you can use to cancel or get the status of the task. Elasticsearch creates a record of this task as a document at `.tasks/task/${taskId}`. When you are done with a task, you should delete the task document so Elasticsearch can reclaim the space., max_docs: num # The maximum number of documents to delete., query: any # The documents to delete specified with Query DSL., slice: any # Slice the request manually using the provided slice ID and total number of slices., sort: any # A sort object that specifies the order of deleted documents.}
@returns(200) {batches: num, deleted: num, failures: [map], noops: num, requests_per_second: num, retries: any, slice_id: num, slices: [map], task: any, throttled: any, throttled_millis: any, throttled_until: any, throttled_until_millis: any, timed_out: bool, took: any, total: num, version_conflicts: num}
@example_request "{\n  \"query\": {\n    \"match_all\": {}\n  }\n}"

@endgroup

@group _delete_by_query
@endpoint POST /_delete_by_query/{task_id}/_rethrottle
@desc Throttle a delete by query operation
@required {task_id: str # The ID for the task., requests_per_second: num # The throttle for this request in sub-requests per second. To disable throttling, set it to `-1`.}
@returns(200) {node_failures: [map], task_failures: [map], nodes: map, tasks: any}

@endgroup

@group _scripts
@endpoint GET /_scripts/{id}
@desc Get a script or search template
@required {id: str # The identifier for the stored script or search template.}
@optional {master_timeout: any # The period to wait for the master node. If the master node is not available before the timeout expires, the request fails and returns an error. It can also be set to `-1` to indicate that the request should never timeout.}
@returns(200) {_id: any, found: bool, script: any}

@endpoint PUT /_scripts/{id}
@desc Create or update a script or search template
@required {id: str # The identifier for the stored script or search template. It must be unique within the cluster., script: any # The script or search template, its parameters, and its language.}
@optional {context: str # The context in which the script or search template should run. To prevent errors, the API immediately compiles the script or template in this context. If you specify both this and the `` path parameter, the API uses the request path parameter., master_timeout: any # The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. It can also be set to `-1` to indicate that the request should never timeout., timeout: any # The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. It can also be set to `-1` to indicate that the request should never timeout.}
@returns(200) {acknowledged: bool}
@example_request "{\n  \"script\": {\n    \"lang\": \"mustache\",\n    \"source\": {\n      \"query\": {\n        \"match\": {\n          \"message\": \"{{query_string}}\"\n        }\n      },\n      \"from\": \"{{from}}\",\n      \"size\": \"{{size}}\"\n    }\n  }\n}"

@endpoint POST /_scripts/{id}
@desc Create or update a script or search template
@required {id: str # The identifier for the stored script or search template. It must be unique within the cluster., script: any # The script or search template, its parameters, and its language.}
@optional {context: str # The context in which the script or search template should run. To prevent errors, the API immediately compiles the script or template in this context. If you specify both this and the `` path parameter, the API uses the request path parameter., master_timeout: any # The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. It can also be set to `-1` to indicate that the request should never timeout., timeout: any # The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. It can also be set to `-1` to indicate that the request should never timeout.}
@returns(200) {acknowledged: bool}
@example_request "{\n  \"script\": {\n    \"lang\": \"mustache\",\n    \"source\": {\n      \"query\": {\n        \"match\": {\n          \"message\": \"{{query_string}}\"\n        }\n      },\n      \"from\": \"{{from}}\",\n      \"size\": \"{{size}}\"\n    }\n  }\n}"

@endpoint DELETE /_scripts/{id}
@desc Delete a script or search template
@required {id: str # The identifier for the stored script or search template.}
@optional {master_timeout: any # The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. It can also be set to `-1` to indicate that the request should never timeout., timeout: any # The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. It can also be set to `-1` to indicate that the request should never timeout.}
@returns(200) {acknowledged: bool}

@endgroup

@group _enrich
@endpoint GET /_enrich/policy/{name}
@desc Get an enrich policy
@required {name: any # Comma-separated list of enrich policy names used to limit the request. To return information for all enrich policies, omit this parameter.}
@optional {master_timeout: any # Period to wait for a connection to the master node.}
@returns(200) {policies: [map]}

@endpoint PUT /_enrich/policy/{name}
@desc Create an enrich policy
@required {name: str # Name of the enrich policy to create or update.}
@optional {master_timeout: any # Period to wait for a connection to the master node., geo_match: any # Matches enrich data to incoming documents based on a `geo_shape` query., match: any # Matches enrich data to incoming documents based on a `term` query., range: any # Matches a number, date, or IP address in incoming documents to a range in the enrich index based on a `term` query.}
@returns(200) {acknowledged: bool}
@example_request "{\n  \"geo_match\": {\n    \"indices\": \"postal_codes\",\n    \"match_field\": \"location\",\n    \"enrich_fields\": [ \"location\", \"postal_code\" ]\n  }\n}"

@endpoint DELETE /_enrich/policy/{name}
@desc Delete an enrich policy
@required {name: str # Enrich policy to delete.}
@optional {master_timeout: any # Period to wait for a connection to the master node.}
@returns(200) {acknowledged: bool}

@endpoint PUT /_enrich/policy/{name}/_execute
@desc Run an enrich policy
@required {name: str # Enrich policy to execute.}
@optional {master_timeout: any # Period to wait for a connection to the master node., wait_for_completion: bool # If `true`, the request blocks other enrich policy execution requests until complete.}
@returns(200) {status: any, task: any}

@endpoint GET /_enrich/policy
@desc Get an enrich policy
@optional {master_timeout: any # Period to wait for a connection to the master node.}
@returns(200) {policies: [map]}

@endpoint GET /_enrich/_stats
@desc Get enrich stats
@optional {master_timeout: any # Period to wait for a connection to the master node.}
@returns(200) {coordinator_stats: [map], executing_policies: [map], cache_stats: [map]}

@endgroup

@group _eql
@endpoint GET /_eql/search/{id}
@desc Get async EQL search results
@required {id: str # Identifier for the search.}
@optional {keep_alive: any # Period for which the search and its results are stored on the cluster. Defaults to the keep_alive value set by the search’s EQL search API request., wait_for_completion_timeout: any # Timeout duration to wait for the request to finish. Defaults to no timeout, meaning the request waits for complete search results.}
@returns(200) {id: any, is_partial: bool, is_running: bool, took: any, timed_out: bool, hits: any, shard_failures: [map]}

@endpoint DELETE /_eql/search/{id}
@desc Delete an async EQL search
@required {id: str # Identifier for the search to delete. A search ID is provided in the EQL search API's response for an async search. A search ID is also provided if the request’s `keep_on_completion` parameter is `true`.}
@returns(200) {acknowledged: bool}

@endpoint GET /_eql/search/status/{id}
@desc Get the async EQL status
@required {id: str # Identifier for the search.}
@returns(200) {id: any, is_partial: bool, is_running: bool, start_time_in_millis: any, expiration_time_in_millis: any, completion_status: num}

@endgroup

@group {index}
@endpoint GET /{index}/_eql/search
@desc Get EQL search results
@required {index: any # Comma-separated list of index names to scope the operation, query: str # EQL query you wish to run.}
@optional {allow_no_indices: bool # A setting that does two separate checks on the index expression. If `false`, the request returns an error (1) if any wildcard expression (including `_all` and `*`) resolves to zero matching indices or (2) if the complete set of resolved indices, aliases or data streams is empty after all expressions are evaluated. If `true`, index expressions that resolve to no indices are allowed and the request returns an empty result., allow_partial_search_results: bool # If true, returns partial results if there are shard failures. If false, returns an error with no partial results., allow_partial_sequence_results: bool # If true, sequence queries will return partial results in case of shard failures. If false, they will return no results at all. This flag has effect only if allow_partial_search_results is true., expand_wildcards: any # Whether to expand wildcard expression to concrete indices that are open, closed or both., ccs_minimize_roundtrips: bool # Indicates whether network round-trips should be minimized as part of cross-cluster search requests execution, ignore_unavailable: bool # If `false`, the request returns an error if it targets a concrete (non-wildcarded) index, alias, or data stream that is missing, closed, or otherwise unavailable. If `true`, unavailable concrete targets are silently ignored., keep_alive: any # Period for which the search and its results are stored on the cluster., keep_on_completion: bool # If true, the search and its results are stored on the cluster., wait_for_completion_timeout: any # Timeout duration to wait for the request to finish. Defaults to no timeout, meaning the request waits for complete search results., case_sensitive: bool, event_category_field: any=event.category # Field containing the event classification, such as process, file, or network., tiebreaker_field: any # Field used to sort hits with the same timestamp in ascending order, timestamp_field: any=@timestamp # Field containing event timestamp., fetch_size: any=1000 # Maximum number of events to search at a time for sequence queries., filter: any # Query, written in Query DSL, used to filter the events on which the EQL query runs., keep_alive: any, keep_on_completion: bool, wait_for_completion_timeout: any, allow_partial_search_results: bool=true # Allow query execution also in case of shard failures. If true, the query will keep running and will return results based on the available shards. For sequences, the behavior can be further refined using allow_partial_sequence_results, allow_partial_sequence_results: bool=false # This flag applies only to sequences and has effect only if allow_partial_search_results=true. If true, the sequence query will return results based on the available shards, ignoring the others. If false, the sequence query will return successfully, but will always have empty results., size: any # For basic queries, the maximum number of matching events to return. Defaults to 10, fields: any # Array of wildcard (*) patterns. The response returns values for field names matching these patterns in the fields property of each hit., result_position: any=tail, runtime_mappings: any, max_samples_per_key: num=1 # By default, the response of a sample query contains up to `10` samples, with one sample per unique set of join keys. Use the `size` parameter to get a smaller or larger set of samples. To retrieve more than one sample per set of join keys, use the `max_samples_per_key` parameter. Pipes are not supported for sample queries.}
@returns(200) {id: any, is_partial: bool, is_running: bool, took: any, timed_out: bool, hits: any, shard_failures: [map]}
@example_request "{\n  \"query\": \"\"\"\n    process where (process.name == \"cmd.exe\" and process.pid != 2013)\n  \"\"\"\n}"

@endpoint POST /{index}/_eql/search
@desc Get EQL search results
@required {index: any # Comma-separated list of index names to scope the operation, query: str # EQL query you wish to run.}
@optional {allow_no_indices: bool # A setting that does two separate checks on the index expression. If `false`, the request returns an error (1) if any wildcard expression (including `_all` and `*`) resolves to zero matching indices or (2) if the complete set of resolved indices, aliases or data streams is empty after all expressions are evaluated. If `true`, index expressions that resolve to no indices are allowed and the request returns an empty result., allow_partial_search_results: bool # If true, returns partial results if there are shard failures. If false, returns an error with no partial results., allow_partial_sequence_results: bool # If true, sequence queries will return partial results in case of shard failures. If false, they will return no results at all. This flag has effect only if allow_partial_search_results is true., expand_wildcards: any # Whether to expand wildcard expression to concrete indices that are open, closed or both., ccs_minimize_roundtrips: bool # Indicates whether network round-trips should be minimized as part of cross-cluster search requests execution, ignore_unavailable: bool # If `false`, the request returns an error if it targets a concrete (non-wildcarded) index, alias, or data stream that is missing, closed, or otherwise unavailable. If `true`, unavailable concrete targets are silently ignored., keep_alive: any # Period for which the search and its results are stored on the cluster., keep_on_completion: bool # If true, the search and its results are stored on the cluster., wait_for_completion_timeout: any # Timeout duration to wait for the request to finish. Defaults to no timeout, meaning the request waits for complete search results., case_sensitive: bool, event_category_field: any=event.category # Field containing the event classification, such as process, file, or network., tiebreaker_field: any # Field used to sort hits with the same timestamp in ascending order, timestamp_field: any=@timestamp # Field containing event timestamp., fetch_size: any=1000 # Maximum number of events to search at a time for sequence queries., filter: any # Query, written in Query DSL, used to filter the events on which the EQL query runs., keep_alive: any, keep_on_completion: bool, wait_for_completion_timeout: any, allow_partial_search_results: bool=true # Allow query execution also in case of shard failures. If true, the query will keep running and will return results based on the available shards. For sequences, the behavior can be further refined using allow_partial_sequence_results, allow_partial_sequence_results: bool=false # This flag applies only to sequences and has effect only if allow_partial_search_results=true. If true, the sequence query will return results based on the available shards, ignoring the others. If false, the sequence query will return successfully, but will always have empty results., size: any # For basic queries, the maximum number of matching events to return. Defaults to 10, fields: any # Array of wildcard (*) patterns. The response returns values for field names matching these patterns in the fields property of each hit., result_position: any=tail, runtime_mappings: any, max_samples_per_key: num=1 # By default, the response of a sample query contains up to `10` samples, with one sample per unique set of join keys. Use the `size` parameter to get a smaller or larger set of samples. To retrieve more than one sample per set of join keys, use the `max_samples_per_key` parameter. Pipes are not supported for sample queries.}
@returns(200) {id: any, is_partial: bool, is_running: bool, took: any, timed_out: bool, hits: any, shard_failures: [map]}
@example_request "{\n  \"query\": \"\"\"\n    process where (process.name == \"cmd.exe\" and process.pid != 2013)\n  \"\"\"\n}"

@endgroup

@group _query
@endpoint POST /_query/async
@desc Run an async ES|QL query
@required {query: str # The ES|QL query API accepts an ES|QL query string in the query parameter, runs it, and returns the results.}
@optional {allow_partial_results: bool # If `true`, partial results will be returned if there are shard failures, but the query can continue to execute on other clusters and shards. If `false`, the query will fail if there are any failures.  To override the default behavior, you can set the `esql.query.allow_partial_results` cluster setting to `false`., delimiter: str # The character to use between values within a CSV row. It is valid only for the CSV format., drop_null_columns: bool # Indicates whether columns that are entirely `null` will be removed from the `columns` and `values` portion of the results. If `true`, the response will include an extra section under the name `all_columns` which has the name of all the columns., format: str # A short version of the Accept header, e.g. json, yaml.  `csv`, `tsv`, and `txt` formats will return results in a tabular format, excluding other metadata fields from the response.  For async requests, nothing will be returned if the async query doesn't finish within the timeout. The query ID and running status are available in the `X-Elasticsearch-Async-Id` and `X-Elasticsearch-Async-Is-Running` HTTP headers of the response, respectively., columnar: bool # By default, ES|QL returns results as rows. For example, FROM returns each individual document as one row. For the JSON, YAML, CBOR and smile formats, ES|QL can return the results in a columnar fashion where one row represents all the values of a certain column in the results., filter: any # Specify a Query DSL query in the filter parameter to filter the set of documents that an ES|QL query runs on., time_zone: str # Sets the default timezone of the query., locale: str # Returns results (especially dates) formatted per the conventions of the locale., params: any # To avoid any attempts of hacking or code injection, extract the values in a separate list of parameters. Use question mark placeholders (?) in the query string for each of the parameters., profile: bool # If provided and `true` the response will include an extra `profile` object with information on how the query was executed. This information is for human debugging and its format can change at any time but it can give some insight into the performance of each part of the query., tables: map # Tables to use with the LOOKUP operation. The top level key is the table name and the next level key is the column name., include_ccs_metadata: bool=false # When set to `true` and performing a cross-cluster/cross-project query, the response will include an extra `_clusters` object with information about the clusters that participated in the search along with info such as shards count., include_execution_metadata: bool=false # When set to `true`, the response will include an extra `_clusters` object with information about the clusters that participated in the search along with info such as shards count. This is similar to `include_ccs_metadata`, but it also returns metadata when the query is not CCS/CPS, wait_for_completion_timeout: any=1s # The period to wait for the request to finish. By default, the request waits for 1 second for the query results. If the query completes during this period, results are returned Otherwise, a query ID is returned that can later be used to retrieve the results., keep_alive: any=5d # The period for which the query and its results are stored in the cluster. The default period is five days. When this period expires, the query and its results are deleted, even if the query is still ongoing. If the `keep_on_completion` parameter is false, Elasticsearch only stores async queries that do not complete within the period set by the `wait_for_completion_timeout` parameter, regardless of this value., keep_on_completion: bool=false # Indicates whether the query and its results are stored in the cluster. If false, the query and its results are stored in the cluster only if the request does not complete during the period set by the `wait_for_completion_timeout` parameter.}
@returns(200)
@example_request "{\n  \"query\": \"\"\"\n    FROM library,remote-*:library\n    | EVAL year = DATE_TRUNC(1 YEARS, release_date)\n    | STATS MAX(page_count) BY year\n    | SORT year\n    | LIMIT 5\n  \"\"\",\n  \"wait_for_completion_timeout\": \"2s\",\n  \"include_ccs_metadata\": true\n}"

@endpoint GET /_query/async/{id}
@desc Get async ES|QL query results
@required {id: str # The unique identifier of the query. A query ID is provided in the ES|QL async query API response for a query that does not complete in the designated time. A query ID is also provided when the request was submitted with the `keep_on_completion` parameter set to `true`.}
@optional {drop_null_columns: bool # Indicates whether columns that are entirely `null` will be removed from the `columns` and `values` portion of the results. If `true`, the response will include an extra section under the name `all_columns` which has the name of all the columns., format: str # A short version of the Accept header, for example `json` or `yaml`., keep_alive: any # The period for which the query and its results are stored in the cluster. When this period expires, the query and its results are deleted, even if the query is still ongoing., wait_for_completion_timeout: any # The period to wait for the request to finish. By default, the request waits for complete query results. If the request completes during the period specified in this parameter, complete query results are returned. Otherwise, the response returns an `is_running` value of `true` and no results.}
@returns(200)

@endpoint DELETE /_query/async/{id}
@desc Delete an async ES|QL query
@required {id: str # The unique identifier of the query. A query ID is provided in the ES|QL async query API response for a query that does not complete in the designated time. A query ID is also provided when the request was submitted with the `keep_on_completion` parameter set to `true`.}
@returns(200) {acknowledged: bool}

@endpoint POST /_query/async/{id}/stop
@desc Stop async ES|QL query
@required {id: str # The unique identifier of the query. A query ID is provided in the ES|QL async query API response for a query that does not complete in the designated time. A query ID is also provided when the request was submitted with the `keep_on_completion` parameter set to `true`.}
@optional {drop_null_columns: bool # Indicates whether columns that are entirely `null` will be removed from the `columns` and `values` portion of the results. If `true`, the response will include an extra section under the name `all_columns` which has the name of all the columns.}
@returns(200) {took: any, is_partial: bool, all_columns: [map], columns: [map], values: [[any]], _clusters: any, profile: map}

@endpoint GET /_query/queries/{id}
@desc Get a specific running ES|QL query information
@required {id: str # The query ID}
@returns(200) {id: num, node: any, start_time_millis: num, running_time_nanos: num, query: str, coordinating_node: any, data_nodes: [str]}

@endpoint GET /_query/queries
@desc Get running ES|QL queries information
@returns(200) {queries: map}

@endpoint POST /_query
@desc Run an ES|QL query
@required {query: str # The ES|QL query API accepts an ES|QL query string in the query parameter, runs it, and returns the results.}
@optional {format: str # A short version of the Accept header, e.g. json, yaml.  `csv`, `tsv`, and `txt` formats will return results in a tabular format, excluding other metadata fields from the response., delimiter: str # The character to use between values within a CSV row. Only valid for the CSV format., drop_null_columns: bool # Should columns that are entirely `null` be removed from the `columns` and `values` portion of the results? Defaults to `false`. If `true` then the response will include an extra section under the name `all_columns` which has the name of all columns., allow_partial_results: bool # If `true`, partial results will be returned if there are shard failures, but the query can continue to execute on other clusters and shards. If `false`, the query will fail if there are any failures.  To override the default behavior, you can set the `esql.query.allow_partial_results` cluster setting to `false`., columnar: bool # By default, ES|QL returns results as rows. For example, FROM returns each individual document as one row. For the JSON, YAML, CBOR and smile formats, ES|QL can return the results in a columnar fashion where one row represents all the values of a certain column in the results., filter: any # Specify a Query DSL query in the filter parameter to filter the set of documents that an ES|QL query runs on., time_zone: str # Sets the default timezone of the query., locale: str # Returns results (especially dates) formatted per the conventions of the locale., params: any # To avoid any attempts of hacking or code injection, extract the values in a separate list of parameters. Use question mark placeholders (?) in the query string for each of the parameters., profile: bool # If provided and `true` the response will include an extra `profile` object with information on how the query was executed. This information is for human debugging and its format can change at any time but it can give some insight into the performance of each part of the query., tables: map # Tables to use with the LOOKUP operation. The top level key is the table name and the next level key is the column name., include_ccs_metadata: bool=false # When set to `true` and performing a cross-cluster/cross-project query, the response will include an extra `_clusters` object with information about the clusters that participated in the search along with info such as shards count., include_execution_metadata: bool=false # When set to `true`, the response will include an extra `_clusters` object with information about the clusters that participated in the search along with info such as shards count. This is similar to `include_ccs_metadata`, but it also returns metadata when the query is not CCS/CPS}
@returns(200) {took: any, is_partial: bool, all_columns: [map], columns: [map], values: [[any]], _clusters: any, profile: map}
@example_request "{\n  \"query\": \"\"\"\n    FROM library,remote-*:library\n    | EVAL year = DATE_TRUNC(1 YEARS, release_date)\n    | STATS MAX(page_count) BY year\n    | SORT year\n    | LIMIT 5\n  \"\"\",\n  \"include_ccs_metadata\": true\n}"

@endgroup

@group {index}
@endpoint GET /{index}/_source/{id}
@desc Get a document's source
@required {index: str # The name of the index that contains the document., id: str # A unique document identifier.}
@optional {preference: str # The node or shard the operation should be performed on. By default, the operation is randomized between the shard replicas., realtime: bool # If `true`, the request is real-time as opposed to near-real-time., refresh: bool # If `true`, the request refreshes the relevant shards before retrieving the document. Setting it to `true` should be done after careful thought and verification that this does not cause a heavy load on the system (and slow down indexing)., routing: any # A custom value used to route operations to a specific shard., _source: any # Indicates whether to return the `_source` field (`true` or `false`) or lists the fields to return., _source_excludes: any # A comma-separated list of source fields to exclude in the response., _source_includes: any # A comma-separated list of source fields to include in the response., version: num # The version number for concurrency control. It must match the current version of the document for the request to succeed., version_type: str # The version type.}
@returns(200)

@endpoint HEAD /{index}/_source/{id}
@desc Check for a document source
@required {index: str # A comma-separated list of data streams, indices, and aliases. It supports wildcards (`*`)., id: str # A unique identifier for the document.}
@optional {preference: str # The node or shard the operation should be performed on. By default, the operation is randomized between the shard replicas., realtime: bool # If `true`, the request is real-time as opposed to near-real-time., refresh: bool # If `true`, the request refreshes the relevant shards before retrieving the document. Setting it to `true` should be done after careful thought and verification that this does not cause a heavy load on the system (and slow down indexing)., routing: any # A custom value used to route operations to a specific shard., _source: any # Indicates whether to return the `_source` field (`true` or `false`) or lists the fields to return., _source_excludes: any # A comma-separated list of source fields to exclude in the response., _source_includes: any # A comma-separated list of source fields to include in the response., version: num # The version number for concurrency control. It must match the current version of the document for the request to succeed., version_type: str # The version type.}
@returns(200)

@endpoint GET /{index}/_explain/{id}
@desc Explain a document match result
@required {index: str # Index names that are used to limit the request. Only a single index name can be provided to this parameter., id: str # The document identifier.}
@optional {analyzer: str # The analyzer to use for the query string. This parameter can be used only when the `q` query string parameter is specified., analyze_wildcard: bool # If `true`, wildcard and prefix queries are analyzed. This parameter can be used only when the `q` query string parameter is specified., default_operator: str # The default operator for query string query: `and` or `or`. This parameter can be used only when the `q` query string parameter is specified., df: str # The field to use as default where no field prefix is given in the query string. This parameter can be used only when the `q` query string parameter is specified., lenient: bool # If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored. This parameter can be used only when the `q` query string parameter is specified., preference: str # The node or shard the operation should be performed on. It is random by default., routing: any # A custom value used to route operations to a specific shard., _source: any # `True` or `false` to return the `_source` field or not or a list of fields to return., _source_excludes: any # A comma-separated list of source fields to exclude from the response. You can also use this parameter to exclude fields from the subset specified in `_source_includes` query parameter. If the `_source` parameter is `false`, this parameter is ignored., _source_includes: any # A comma-separated list of source fields to include in the response. If this parameter is specified, only these source fields are returned. You can exclude fields from this subset using the `_source_excludes` query parameter. If the `_source` parameter is `false`, this parameter is ignored., stored_fields: any # A comma-separated list of stored fields to return in the response., q: str # The query in the Lucene query string syntax., query: any # Defines the search definition using the Query DSL.}
@returns(200) {_index: any, _id: any, matched: bool, explanation: any, get: any}
@example_request "{\n  \"query\" : {\n    \"match\" : { \"message\" : \"elasticsearch\" }\n  }\n}"

@endpoint POST /{index}/_explain/{id}
@desc Explain a document match result
@required {index: str # Index names that are used to limit the request. Only a single index name can be provided to this parameter., id: str # The document identifier.}
@optional {analyzer: str # The analyzer to use for the query string. This parameter can be used only when the `q` query string parameter is specified., analyze_wildcard: bool # If `true`, wildcard and prefix queries are analyzed. This parameter can be used only when the `q` query string parameter is specified., default_operator: str # The default operator for query string query: `and` or `or`. This parameter can be used only when the `q` query string parameter is specified., df: str # The field to use as default where no field prefix is given in the query string. This parameter can be used only when the `q` query string parameter is specified., lenient: bool # If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored. This parameter can be used only when the `q` query string parameter is specified., preference: str # The node or shard the operation should be performed on. It is random by default., routing: any # A custom value used to route operations to a specific shard., _source: any # `True` or `false` to return the `_source` field or not or a list of fields to return., _source_excludes: any # A comma-separated list of source fields to exclude from the response. You can also use this parameter to exclude fields from the subset specified in `_source_includes` query parameter. If the `_source` parameter is `false`, this parameter is ignored., _source_includes: any # A comma-separated list of source fields to include in the response. If this parameter is specified, only these source fields are returned. You can exclude fields from this subset using the `_source_excludes` query parameter. If the `_source` parameter is `false`, this parameter is ignored., stored_fields: any # A comma-separated list of stored fields to return in the response., q: str # The query in the Lucene query string syntax., query: any # Defines the search definition using the Query DSL.}
@returns(200) {_index: any, _id: any, matched: bool, explanation: any, get: any}
@example_request "{\n  \"query\" : {\n    \"match\" : { \"message\" : \"elasticsearch\" }\n  }\n}"

@endgroup

@group _features
@endpoint GET /_features
@desc Get the features
@optional {master_timeout: any # Period to wait for a connection to the master node.}
@returns(200) {features: [map]}

@endpoint POST /_features/_reset
@desc Reset the features
@optional {master_timeout: any # Period to wait for a connection to the master node.}
@returns(200) {features: [map]}

@endgroup

@group _field_caps
@endpoint GET /_field_caps
@desc Get the field capabilities
@optional {allow_no_indices: bool # A setting that does two separate checks on the index expression. If `false`, the request returns an error (1) if any wildcard expression (including `_all` and `*`) resolves to zero matching indices or (2) if the complete set of resolved indices, aliases or data streams is empty after all expressions are evaluated. If `true`, index expressions that resolve to no indices are allowed and the request returns an empty result., expand_wildcards: any # The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`., fields: any # A comma-separated list of fields to retrieve capabilities for. Wildcard (`*`) expressions are supported., ignore_unavailable: bool # If `false`, the request returns an error if it targets a concrete (non-wildcarded) index, alias, or data stream that is missing, closed, or otherwise unavailable. If `true`, unavailable concrete targets are silently ignored., include_unmapped: bool # If true, unmapped fields are included in the response., filters: any # A comma-separated list of filters to apply to the response., types: [str] # A comma-separated list of field types to include. Any fields that do not match one of these types will be excluded from the results. It defaults to empty, meaning that all field types are returned., include_empty_fields: bool # If false, empty fields are not included in the response., fields: any # A list of fields to retrieve capabilities for. Wildcard (`*`) expressions are supported., index_filter: any # Filter indices if the provided query rewrites to `match_none` on every shard.  IMPORTANT: The filtering is done on a best-effort basis, it uses index statistics and mappings to rewrite queries to `match_none` instead of fully running the request. For instance a range query over a date field can rewrite to `match_none` if all documents within a shard (including deleted documents) are outside of the provided range. However, not all queries can rewrite to `match_none` so this API may return an index even if the provided filter matches no document., runtime_mappings: any # Define ad-hoc runtime fields in the request similar to the way it is done in search requests. These fields exist only as part of the query and take precedence over fields defined with the same name in the index mappings.}
@returns(200) {indices: any, fields: map}
@example_request "{\n  \"index_filter\": {\n    \"range\": {\n      \"@timestamp\": {\n        \"gte\": \"2018\"\n      }\n    }\n  }\n}"

@endpoint POST /_field_caps
@desc Get the field capabilities
@optional {allow_no_indices: bool # A setting that does two separate checks on the index expression. If `false`, the request returns an error (1) if any wildcard expression (including `_all` and `*`) resolves to zero matching indices or (2) if the complete set of resolved indices, aliases or data streams is empty after all expressions are evaluated. If `true`, index expressions that resolve to no indices are allowed and the request returns an empty result., expand_wildcards: any # The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`., fields: any # A comma-separated list of fields to retrieve capabilities for. Wildcard (`*`) expressions are supported., ignore_unavailable: bool # If `false`, the request returns an error if it targets a concrete (non-wildcarded) index, alias, or data stream that is missing, closed, or otherwise unavailable. If `true`, unavailable concrete targets are silently ignored., include_unmapped: bool # If true, unmapped fields are included in the response., filters: any # A comma-separated list of filters to apply to the response., types: [str] # A comma-separated list of field types to include. Any fields that do not match one of these types will be excluded from the results. It defaults to empty, meaning that all field types are returned., include_empty_fields: bool # If false, empty fields are not included in the response., fields: any # A list of fields to retrieve capabilities for. Wildcard (`*`) expressions are supported., index_filter: any # Filter indices if the provided query rewrites to `match_none` on every shard.  IMPORTANT: The filtering is done on a best-effort basis, it uses index statistics and mappings to rewrite queries to `match_none` instead of fully running the request. For instance a range query over a date field can rewrite to `match_none` if all documents within a shard (including deleted documents) are outside of the provided range. However, not all queries can rewrite to `match_none` so this API may return an index even if the provided filter matches no document., runtime_mappings: any # Define ad-hoc runtime fields in the request similar to the way it is done in search requests. These fields exist only as part of the query and take precedence over fields defined with the same name in the index mappings.}
@returns(200) {indices: any, fields: map}
@example_request "{\n  \"index_filter\": {\n    \"range\": {\n      \"@timestamp\": {\n        \"gte\": \"2018\"\n      }\n    }\n  }\n}"

@endgroup

@group {index}
@endpoint GET /{index}/_field_caps
@desc Get the field capabilities
@required {index: any # A comma-separated list of data streams, indices, and aliases used to limit the request. Supports wildcards (*). To target all data streams and indices, omit this parameter or use * or _all.}
@optional {allow_no_indices: bool # A setting that does two separate checks on the index expression. If `false`, the request returns an error (1) if any wildcard expression (including `_all` and `*`) resolves to zero matching indices or (2) if the complete set of resolved indices, aliases or data streams is empty after all expressions are evaluated. If `true`, index expressions that resolve to no indices are allowed and the request returns an empty result., expand_wildcards: any # The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`., fields: any # A comma-separated list of fields to retrieve capabilities for. Wildcard (`*`) expressions are supported., ignore_unavailable: bool # If `false`, the request returns an error if it targets a concrete (non-wildcarded) index, alias, or data stream that is missing, closed, or otherwise unavailable. If `true`, unavailable concrete targets are silently ignored., include_unmapped: bool # If true, unmapped fields are included in the response., filters: any # A comma-separated list of filters to apply to the response., types: [str] # A comma-separated list of field types to include. Any fields that do not match one of these types will be excluded from the results. It defaults to empty, meaning that all field types are returned., include_empty_fields: bool # If false, empty fields are not included in the response., fields: any # A list of fields to retrieve capabilities for. Wildcard (`*`) expressions are supported., index_filter: any # Filter indices if the provided query rewrites to `match_none` on every shard.  IMPORTANT: The filtering is done on a best-effort basis, it uses index statistics and mappings to rewrite queries to `match_none` instead of fully running the request. For instance a range query over a date field can rewrite to `match_none` if all documents within a shard (including deleted documents) are outside of the provided range. However, not all queries can rewrite to `match_none` so this API may return an index even if the provided filter matches no document., runtime_mappings: any # Define ad-hoc runtime fields in the request similar to the way it is done in search requests. These fields exist only as part of the query and take precedence over fields defined with the same name in the index mappings.}
@returns(200) {indices: any, fields: map}
@example_request "{\n  \"index_filter\": {\n    \"range\": {\n      \"@timestamp\": {\n        \"gte\": \"2018\"\n      }\n    }\n  }\n}"

@endpoint POST /{index}/_field_caps
@desc Get the field capabilities
@required {index: any # A comma-separated list of data streams, indices, and aliases used to limit the request. Supports wildcards (*). To target all data streams and indices, omit this parameter or use * or _all.}
@optional {allow_no_indices: bool # A setting that does two separate checks on the index expression. If `false`, the request returns an error (1) if any wildcard expression (including `_all` and `*`) resolves to zero matching indices or (2) if the complete set of resolved indices, aliases or data streams is empty after all expressions are evaluated. If `true`, index expressions that resolve to no indices are allowed and the request returns an empty result., expand_wildcards: any # The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`., fields: any # A comma-separated list of fields to retrieve capabilities for. Wildcard (`*`) expressions are supported., ignore_unavailable: bool # If `false`, the request returns an error if it targets a concrete (non-wildcarded) index, alias, or data stream that is missing, closed, or otherwise unavailable. If `true`, unavailable concrete targets are silently ignored., include_unmapped: bool # If true, unmapped fields are included in the response., filters: any # A comma-separated list of filters to apply to the response., types: [str] # A comma-separated list of field types to include. Any fields that do not match one of these types will be excluded from the results. It defaults to empty, meaning that all field types are returned., include_empty_fields: bool # If false, empty fields are not included in the response., fields: any # A list of fields to retrieve capabilities for. Wildcard (`*`) expressions are supported., index_filter: any # Filter indices if the provided query rewrites to `match_none` on every shard.  IMPORTANT: The filtering is done on a best-effort basis, it uses index statistics and mappings to rewrite queries to `match_none` instead of fully running the request. For instance a range query over a date field can rewrite to `match_none` if all documents within a shard (including deleted documents) are outside of the provided range. However, not all queries can rewrite to `match_none` so this API may return an index even if the provided filter matches no document., runtime_mappings: any # Define ad-hoc runtime fields in the request similar to the way it is done in search requests. These fields exist only as part of the query and take precedence over fields defined with the same name in the index mappings.}
@returns(200) {indices: any, fields: map}
@example_request "{\n  \"index_filter\": {\n    \"range\": {\n      \"@timestamp\": {\n        \"gte\": \"2018\"\n      }\n    }\n  }\n}"

@endpoint GET /{index}/_fleet/global_checkpoints
@desc Get global checkpoints
@required {index: any # A single index or index alias that resolves to a single index.}
@optional {wait_for_advance: bool # A boolean value which controls whether to wait (until the timeout) for the global checkpoints to advance past the provided `checkpoints`., wait_for_index: bool # A boolean value which controls whether to wait (until the timeout) for the target index to exist and all primary shards be active. Can only be true when `wait_for_advance` is true., checkpoints: [num] # A comma separated list of previous global checkpoints. When used in combination with `wait_for_advance`, the API will only return once the global checkpoints advances past the checkpoints. Providing an empty list will cause Elasticsearch to immediately return the current global checkpoints., timeout: any # Period to wait for a global checkpoints to advance past `checkpoints`.}
@returns(200) {global_checkpoints: [num], timed_out: bool}

@endgroup

@group _fleet
@endpoint GET /_fleet/_fleet_msearch
@desc Run multiple Fleet searches
@optional {allow_no_indices: bool # A setting that does two separate checks on the index expression. If `false`, the request returns an error (1) if any wildcard expression (including `_all` and `*`) resolves to zero matching indices or (2) if the complete set of resolved indices, aliases or data streams is empty after all expressions are evaluated. If `true`, index expressions that resolve to no indices are allowed and the request returns an empty result., ccs_minimize_roundtrips: bool # If true, network roundtrips between the coordinating node and remote clusters are minimized for cross-cluster search requests., expand_wildcards: any # Type of index that wildcard expressions can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams., ignore_throttled: bool # If true, concrete, expanded or aliased indices are ignored when frozen., ignore_unavailable: bool # If `false`, the request returns an error if it targets a concrete (non-wildcarded) index, alias, or data stream that is missing, closed, or otherwise unavailable. If `true`, unavailable concrete targets are silently ignored., max_concurrent_searches: num # Maximum number of concurrent searches the multi search API can execute., max_concurrent_shard_requests: num # Maximum number of concurrent shard requests that each sub-search request executes per node., pre_filter_shard_size: num # Defines a threshold that enforces a pre-filter roundtrip to prefilter search shards based on query rewriting if the number of shards the search request expands to exceeds the threshold. This filter roundtrip can limit the number of shards significantly if for instance a shard can not match any documents based on its rewrite method i.e., if date filters are mandatory to match but the shard bounds and the query are disjoint., search_type: str # Indicates whether global term and document frequencies should be used when scoring returned documents., rest_total_hits_as_int: bool # If true, hits.total are returned as an integer in the response. Defaults to false, which returns an object., typed_keys: bool # Specifies whether aggregation and suggester names should be prefixed by their respective types in the response., wait_for_checkpoints: [num] # A comma separated list of checkpoints. When configured, the search API will only be executed on a shard after the relevant checkpoint has become visible for search. Defaults to an empty list which will cause Elasticsearch to immediately execute the search., allow_partial_search_results: bool # If true, returns partial results if there are shard request timeouts or shard failures. If false, returns an error with no partial results. Defaults to the configured cluster setting `search.default_allow_partial_results`, which is true by default.}
@returns(200) {docs: [any]}

@endpoint POST /_fleet/_fleet_msearch
@desc Run multiple Fleet searches
@optional {allow_no_indices: bool # A setting that does two separate checks on the index expression. If `false`, the request returns an error (1) if any wildcard expression (including `_all` and `*`) resolves to zero matching indices or (2) if the complete set of resolved indices, aliases or data streams is empty after all expressions are evaluated. If `true`, index expressions that resolve to no indices are allowed and the request returns an empty result., ccs_minimize_roundtrips: bool # If true, network roundtrips between the coordinating node and remote clusters are minimized for cross-cluster search requests., expand_wildcards: any # Type of index that wildcard expressions can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams., ignore_throttled: bool # If true, concrete, expanded or aliased indices are ignored when frozen., ignore_unavailable: bool # If `false`, the request returns an error if it targets a concrete (non-wildcarded) index, alias, or data stream that is missing, closed, or otherwise unavailable. If `true`, unavailable concrete targets are silently ignored., max_concurrent_searches: num # Maximum number of concurrent searches the multi search API can execute., max_concurrent_shard_requests: num # Maximum number of concurrent shard requests that each sub-search request executes per node., pre_filter_shard_size: num # Defines a threshold that enforces a pre-filter roundtrip to prefilter search shards based on query rewriting if the number of shards the search request expands to exceeds the threshold. This filter roundtrip can limit the number of shards significantly if for instance a shard can not match any documents based on its rewrite method i.e., if date filters are mandatory to match but the shard bounds and the query are disjoint., search_type: str # Indicates whether global term and document frequencies should be used when scoring returned documents., rest_total_hits_as_int: bool # If true, hits.total are returned as an integer in the response. Defaults to false, which returns an object., typed_keys: bool # Specifies whether aggregation and suggester names should be prefixed by their respective types in the response., wait_for_checkpoints: [num] # A comma separated list of checkpoints. When configured, the search API will only be executed on a shard after the relevant checkpoint has become visible for search. Defaults to an empty list which will cause Elasticsearch to immediately execute the search., allow_partial_search_results: bool # If true, returns partial results if there are shard request timeouts or shard failures. If false, returns an error with no partial results. Defaults to the configured cluster setting `search.default_allow_partial_results`, which is true by default.}
@returns(200) {docs: [any]}

@endgroup

@group {index}
@endpoint GET /{index}/_fleet/_fleet_msearch
@desc Run multiple Fleet searches
@required {index: any # A single target to search. If the target is an index alias, it must resolve to a single index.}
@optional {allow_no_indices: bool # A setting that does two separate checks on the index expression. If `false`, the request returns an error (1) if any wildcard expression (including `_all` and `*`) resolves to zero matching indices or (2) if the complete set of resolved indices, aliases or data streams is empty after all expressions are evaluated. If `true`, index expressions that resolve to no indices are allowed and the request returns an empty result., ccs_minimize_roundtrips: bool # If true, network roundtrips between the coordinating node and remote clusters are minimized for cross-cluster search requests., expand_wildcards: any # Type of index that wildcard expressions can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams., ignore_throttled: bool # If true, concrete, expanded or aliased indices are ignored when frozen., ignore_unavailable: bool # If `false`, the request returns an error if it targets a concrete (non-wildcarded) index, alias, or data stream that is missing, closed, or otherwise unavailable. If `true`, unavailable concrete targets are silently ignored., max_concurrent_searches: num # Maximum number of concurrent searches the multi search API can execute., max_concurrent_shard_requests: num # Maximum number of concurrent shard requests that each sub-search request executes per node., pre_filter_shard_size: num # Defines a threshold that enforces a pre-filter roundtrip to prefilter search shards based on query rewriting if the number of shards the search request expands to exceeds the threshold. This filter roundtrip can limit the number of shards significantly if for instance a shard can not match any documents based on its rewrite method i.e., if date filters are mandatory to match but the shard bounds and the query are disjoint., search_type: str # Indicates whether global term and document frequencies should be used when scoring returned documents., rest_total_hits_as_int: bool # If true, hits.total are returned as an integer in the response. Defaults to false, which returns an object., typed_keys: bool # Specifies whether aggregation and suggester names should be prefixed by their respective types in the response., wait_for_checkpoints: [num] # A comma separated list of checkpoints. When configured, the search API will only be executed on a shard after the relevant checkpoint has become visible for search. Defaults to an empty list which will cause Elasticsearch to immediately execute the search., allow_partial_search_results: bool # If true, returns partial results if there are shard request timeouts or shard failures. If false, returns an error with no partial results. Defaults to the configured cluster setting `search.default_allow_partial_results`, which is true by default.}
@returns(200) {docs: [any]}

@endpoint POST /{index}/_fleet/_fleet_msearch
@desc Run multiple Fleet searches
@required {index: any # A single target to search. If the target is an index alias, it must resolve to a single index.}
@optional {allow_no_indices: bool # A setting that does two separate checks on the index expression. If `false`, the request returns an error (1) if any wildcard expression (including `_all` and `*`) resolves to zero matching indices or (2) if the complete set of resolved indices, aliases or data streams is empty after all expressions are evaluated. If `true`, index expressions that resolve to no indices are allowed and the request returns an empty result., ccs_minimize_roundtrips: bool # If true, network roundtrips between the coordinating node and remote clusters are minimized for cross-cluster search requests., expand_wildcards: any # Type of index that wildcard expressions can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams., ignore_throttled: bool # If true, concrete, expanded or aliased indices are ignored when frozen., ignore_unavailable: bool # If `false`, the request returns an error if it targets a concrete (non-wildcarded) index, alias, or data stream that is missing, closed, or otherwise unavailable. If `true`, unavailable concrete targets are silently ignored., max_concurrent_searches: num # Maximum number of concurrent searches the multi search API can execute., max_concurrent_shard_requests: num # Maximum number of concurrent shard requests that each sub-search request executes per node., pre_filter_shard_size: num # Defines a threshold that enforces a pre-filter roundtrip to prefilter search shards based on query rewriting if the number of shards the search request expands to exceeds the threshold. This filter roundtrip can limit the number of shards significantly if for instance a shard can not match any documents based on its rewrite method i.e., if date filters are mandatory to match but the shard bounds and the query are disjoint., search_type: str # Indicates whether global term and document frequencies should be used when scoring returned documents., rest_total_hits_as_int: bool # If true, hits.total are returned as an integer in the response. Defaults to false, which returns an object., typed_keys: bool # Specifies whether aggregation and suggester names should be prefixed by their respective types in the response., wait_for_checkpoints: [num] # A comma separated list of checkpoints. When configured, the search API will only be executed on a shard after the relevant checkpoint has become visible for search. Defaults to an empty list which will cause Elasticsearch to immediately execute the search., allow_partial_search_results: bool # If true, returns partial results if there are shard request timeouts or shard failures. If false, returns an error with no partial results. Defaults to the configured cluster setting `search.default_allow_partial_results`, which is true by default.}
@returns(200) {docs: [any]}

@endpoint GET /{index}/_fleet/_fleet_search
@desc Run a Fleet search
@required {index: any # A single target to search. If the target is an index alias, it must resolve to a single index.}
@optional {allow_no_indices: bool # A setting that does two separate checks on the index expression. If `false`, the request returns an error (1) if any wildcard expression (including `_all` and `*`) resolves to zero matching indices or (2) if the complete set of resolved indices, aliases or data streams is empty after all expressions are evaluated. If `true`, index expressions that resolve to no indices are allowed and the request returns an empty result., analyzer: str, analyze_wildcard: bool, batched_reduce_size: num, ccs_minimize_roundtrips: bool, default_operator: str, df: str, docvalue_fields: any, expand_wildcards: any, explain: bool, ignore_throttled: bool, ignore_unavailable: bool # If `false`, the request returns an error if it targets a concrete (non-wildcarded) index, alias, or data stream that is missing, closed, or otherwise unavailable. If `true`, unavailable concrete targets are silently ignored., lenient: bool, max_concurrent_shard_requests: num, preference: str, pre_filter_shard_size: num, request_cache: bool, routing: any, scroll: any, search_type: str, stats: [str], stored_fields: any, suggest_field: str # Specifies which field to use for suggestions., suggest_mode: str, suggest_size: num, suggest_text: str # The source text for which the suggestions should be returned., terminate_after: num, timeout: any, track_total_hits: any, track_scores: bool, typed_keys: bool, rest_total_hits_as_int: bool, version: bool, _source: any, _source_excludes: any, _source_includes: any, seq_no_primary_term: bool, q: str, size: num, from: num, sort: any, wait_for_checkpoints: [num] # A comma separated list of checkpoints. When configured, the search API will only be executed on a shard after the relevant checkpoint has become visible for search. Defaults to an empty list which will cause Elasticsearch to immediately execute the search., allow_partial_search_results: bool # If true, returns partial results if there are shard request timeouts or shard failures. If false, returns an error with no partial results. Defaults to the configured cluster setting `search.default_allow_partial_results`, which is true by default., aggregations: map, collapse: any, explain: bool=false # If true, returns detailed information about score computation as part of a hit., ext: map # Configuration of search extensions defined by Elasticsearch plugins., from: num=0 # Starting document offset. By default, you cannot page through more than 10,000 hits using the from and size parameters. To page through more hits, use the search_after parameter., highlight: any, track_total_hits: any # Number of hits matching the query to count accurately. If true, the exact number of hits is returned at the cost of some performance. If false, the response does not include the total number of hits matching the query. Defaults to 10,000 hits., indices_boost: [map] # Boosts the _score of documents from specified indices., docvalue_fields: [map{field!: any, format: str, include_unmapped: bool}] # Array of wildcard (*) patterns. The request returns doc values for field names matching these patterns in the hits.fields property of the response., min_score: num # Minimum _score for matching documents. Documents with a lower _score are not included in search results and results collected by aggregations., post_filter: any, profile: bool, query: any # Defines the search definition using the Query DSL., rescore: any, script_fields: map # Retrieve a script evaluation (based on different fields) for each hit., search_after: any, size: num=10 # The number of hits to return. By default, you cannot page through more than 10,000 hits using the from and size parameters. To page through more hits, use the search_after parameter., slice: any, sort: any, _source: any # Indicates which source fields are returned for matching documents. These fields are returned in the hits._source property of the search response., fields: [map{field!: any, format: str, include_unmapped: bool}] # Array of wildcard (*) patterns. The request returns values for field names matching these patterns in the hits.fields property of the response., suggest: any, terminate_after: num=0 # Maximum number of documents to collect for each shard. If a query reaches this limit, Elasticsearch terminates the query early. Elasticsearch collects documents before sorting. Defaults to 0, which does not terminate query execution early., timeout: str # Specifies the period of time to wait for a response from each shard. If no response is received before the timeout expires, the request fails and returns an error. Defaults to no timeout., track_scores: bool=false # If true, calculate and return document scores, even if the scores are not used for sorting., version: bool=false # If true, returns document version as part of a hit., seq_no_primary_term: bool # If true, returns sequence number and primary term of the last modification of each hit. See Optimistic concurrency control., stored_fields: any # List of stored fields to return as part of a hit. If no fields are specified, no stored fields are included in the response. If this field is specified, the _source parameter defaults to false. You can pass _source: true to return both source fields and stored fields in the search response., pit: any # Limits the search to a point in time (PIT). If you provide a PIT, you cannot specify an  in the request path., runtime_mappings: any # Defines one or more runtime fields in the search request. These fields take precedence over mapped fields with the same name., stats: [str] # Stats groups to associate with the search. Each group maintains a statistics aggregation for its associated searches. You can retrieve these stats using the indices stats API.}
@returns(200) {took: num, timed_out: bool, _shards: any, hits: any, aggregations: map, _clusters: any, fields: map, max_score: num, num_reduce_phases: num, profile: any, pit_id: any, _scroll_id: any, suggest: map, terminated_early: bool}

@endpoint POST /{index}/_fleet/_fleet_search
@desc Run a Fleet search
@required {index: any # A single target to search. If the target is an index alias, it must resolve to a single index.}
@optional {allow_no_indices: bool # A setting that does two separate checks on the index expression. If `false`, the request returns an error (1) if any wildcard expression (including `_all` and `*`) resolves to zero matching indices or (2) if the complete set of resolved indices, aliases or data streams is empty after all expressions are evaluated. If `true`, index expressions that resolve to no indices are allowed and the request returns an empty result., analyzer: str, analyze_wildcard: bool, batched_reduce_size: num, ccs_minimize_roundtrips: bool, default_operator: str, df: str, docvalue_fields: any, expand_wildcards: any, explain: bool, ignore_throttled: bool, ignore_unavailable: bool # If `false`, the request returns an error if it targets a concrete (non-wildcarded) index, alias, or data stream that is missing, closed, or otherwise unavailable. If `true`, unavailable concrete targets are silently ignored., lenient: bool, max_concurrent_shard_requests: num, preference: str, pre_filter_shard_size: num, request_cache: bool, routing: any, scroll: any, search_type: str, stats: [str], stored_fields: any, suggest_field: str # Specifies which field to use for suggestions., suggest_mode: str, suggest_size: num, suggest_text: str # The source text for which the suggestions should be returned., terminate_after: num, timeout: any, track_total_hits: any, track_scores: bool, typed_keys: bool, rest_total_hits_as_int: bool, version: bool, _source: any, _source_excludes: any, _source_includes: any, seq_no_primary_term: bool, q: str, size: num, from: num, sort: any, wait_for_checkpoints: [num] # A comma separated list of checkpoints. When configured, the search API will only be executed on a shard after the relevant checkpoint has become visible for search. Defaults to an empty list which will cause Elasticsearch to immediately execute the search., allow_partial_search_results: bool # If true, returns partial results if there are shard request timeouts or shard failures. If false, returns an error with no partial results. Defaults to the configured cluster setting `search.default_allow_partial_results`, which is true by default., aggregations: map, collapse: any, explain: bool=false # If true, returns detailed information about score computation as part of a hit., ext: map # Configuration of search extensions defined by Elasticsearch plugins., from: num=0 # Starting document offset. By default, you cannot page through more than 10,000 hits using the from and size parameters. To page through more hits, use the search_after parameter., highlight: any, track_total_hits: any # Number of hits matching the query to count accurately. If true, the exact number of hits is returned at the cost of some performance. If false, the response does not include the total number of hits matching the query. Defaults to 10,000 hits., indices_boost: [map] # Boosts the _score of documents from specified indices., docvalue_fields: [map{field!: any, format: str, include_unmapped: bool}] # Array of wildcard (*) patterns. The request returns doc values for field names matching these patterns in the hits.fields property of the response., min_score: num # Minimum _score for matching documents. Documents with a lower _score are not included in search results and results collected by aggregations., post_filter: any, profile: bool, query: any # Defines the search definition using the Query DSL., rescore: any, script_fields: map # Retrieve a script evaluation (based on different fields) for each hit., search_after: any, size: num=10 # The number of hits to return. By default, you cannot page through more than 10,000 hits using the from and size parameters. To page through more hits, use the search_after parameter., slice: any, sort: any, _source: any # Indicates which source fields are returned for matching documents. These fields are returned in the hits._source property of the search response., fields: [map{field!: any, format: str, include_unmapped: bool}] # Array of wildcard (*) patterns. The request returns values for field names matching these patterns in the hits.fields property of the response., suggest: any, terminate_after: num=0 # Maximum number of documents to collect for each shard. If a query reaches this limit, Elasticsearch terminates the query early. Elasticsearch collects documents before sorting. Defaults to 0, which does not terminate query execution early., timeout: str # Specifies the period of time to wait for a response from each shard. If no response is received before the timeout expires, the request fails and returns an error. Defaults to no timeout., track_scores: bool=false # If true, calculate and return document scores, even if the scores are not used for sorting., version: bool=false # If true, returns document version as part of a hit., seq_no_primary_term: bool # If true, returns sequence number and primary term of the last modification of each hit. See Optimistic concurrency control., stored_fields: any # List of stored fields to return as part of a hit. If no fields are specified, no stored fields are included in the response. If this field is specified, the _source parameter defaults to false. You can pass _source: true to return both source fields and stored fields in the search response., pit: any # Limits the search to a point in time (PIT). If you provide a PIT, you cannot specify an  in the request path., runtime_mappings: any # Defines one or more runtime fields in the search request. These fields take precedence over mapped fields with the same name., stats: [str] # Stats groups to associate with the search. Each group maintains a statistics aggregation for its associated searches. You can retrieve these stats using the indices stats API.}
@returns(200) {took: num, timed_out: bool, _shards: any, hits: any, aggregations: map, _clusters: any, fields: map, max_score: num, num_reduce_phases: num, profile: any, pit_id: any, _scroll_id: any, suggest: map, terminated_early: bool}

@endgroup

@group _script_context
@endpoint GET /_script_context
@desc Get script contexts
@returns(200) {contexts: [map]}

@endgroup

@group _script_language
@endpoint GET /_script_language
@desc Get script languages
@returns(200) {language_contexts: [map], types_allowed: [str]}

@endgroup

@group {index}
@endpoint GET /{index}/_graph/explore
@desc Explore graph analytics
@required {index: any # Name of the index.}
@optional {routing: any # Custom value used to route operations to a specific shard., timeout: any # Specifies the period of time to wait for a response from each shard. If no response is received before the timeout expires, the request fails and returns an error. Defaults to no timeout., connections: any # Specifies or more fields from which you want to extract terms that are associated with the specified vertices., controls: any # Direct the Graph API how to build the graph., query: any # A seed query that identifies the documents of interest. Can be any valid Elasticsearch query., vertices: [map{exclude: [str], field!: any, include: [map], min_doc_count: num, shard_min_doc_count: num, size: num}] # Specifies one or more fields that contain the terms you want to include in the graph as vertices.}
@returns(200) {connections: [map], failures: [map], timed_out: bool, took: num, vertices: [map]}
@example_request "{\n  \"query\": {\n    \"match\": {\n      \"query.raw\": \"midi\"\n    }\n  },\n  \"vertices\": [\n    {\n      \"field\": \"product\"\n    }\n  ],\n  \"connections\": {\n    \"vertices\": [\n      {\n        \"field\": \"query.raw\"\n      }\n    ]\n  }\n}"

@endpoint POST /{index}/_graph/explore
@desc Explore graph analytics
@required {index: any # Name of the index.}
@optional {routing: any # Custom value used to route operations to a specific shard., timeout: any # Specifies the period of time to wait for a response from each shard. If no response is received before the timeout expires, the request fails and returns an error. Defaults to no timeout., connections: any # Specifies or more fields from which you want to extract terms that are associated with the specified vertices., controls: any # Direct the Graph API how to build the graph., query: any # A seed query that identifies the documents of interest. Can be any valid Elasticsearch query., vertices: [map{exclude: [str], field!: any, include: [map], min_doc_count: num, shard_min_doc_count: num, size: num}] # Specifies one or more fields that contain the terms you want to include in the graph as vertices.}
@returns(200) {connections: [map], failures: [map], timed_out: bool, took: num, vertices: [map]}
@example_request "{\n  \"query\": {\n    \"match\": {\n      \"query.raw\": \"midi\"\n    }\n  },\n  \"vertices\": [\n    {\n      \"field\": \"product\"\n    }\n  ],\n  \"connections\": {\n    \"vertices\": [\n      {\n        \"field\": \"query.raw\"\n      }\n    ]\n  }\n}"

@endgroup

@group _health_report
@endpoint GET /_health_report
@desc Get the cluster health
@optional {timeout: any # Explicit operation timeout., verbose: bool # Opt-in for more information about the health of the system., size: num # Limit the number of affected resources the health report API returns.}
@returns(200) {cluster_name: str, indicators: any, status: any}

@endpoint GET /_health_report/{feature}
@desc Get the cluster health
@required {feature: any # A feature of the cluster, as returned by the top-level health report API.}
@optional {timeout: any # Explicit operation timeout., verbose: bool # Opt-in for more information about the health of the system., size: num # Limit the number of affected resources the health report API returns.}
@returns(200) {cluster_name: str, indicators: any, status: any}

@endgroup

@group _ilm
@endpoint GET /_ilm/policy/{policy}
@desc Get lifecycle policies
@required {policy: str # Identifier for the policy.}
@optional {master_timeout: any # Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error., timeout: any # Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error.}
@returns(200)

@endpoint PUT /_ilm/policy/{policy}
@desc Create or update a lifecycle policy
@required {policy: str # Identifier for the policy.}
@optional {master_timeout: any # Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error., timeout: any # Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error., policy: any}
@returns(200) {acknowledged: bool}
@example_request "{\n  \"policy\": {\n    \"_meta\": {\n      \"description\": \"used for nginx log\",\n      \"project\": {\n        \"name\": \"myProject\",\n        \"department\": \"myDepartment\"\n      }\n    },\n    \"phases\": {\n      \"warm\": {\n        \"min_age\": \"10d\",\n        \"actions\": {\n          \"forcemerge\": {\n            \"max_num_segments\": 1\n          }\n        }\n      },\n      \"delete\": {\n        \"min_age\": \"30d\",\n        \"actions\": {\n          \"delete\": {}\n        }\n      }\n    }\n  }\n}"

@endpoint DELETE /_ilm/policy/{policy}
@desc Delete a lifecycle policy
@required {policy: str # Identifier for the policy.}
@optional {master_timeout: any # Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error., timeout: any # Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error.}
@returns(200) {acknowledged: bool}

@endgroup

@group {index}
@endpoint GET /{index}/_ilm/explain
@desc Explain the lifecycle state
@required {index: str # Comma-separated list of data streams, indices, and aliases to target. Supports wildcards (`*`). To target all data streams and indices, use `*` or `_all`.}
@optional {only_errors: bool # Filters the returned indices to only indices that are managed by ILM and are in an error state, either due to an encountering an error while executing the policy, or attempting to use a policy that does not exist., only_managed: bool # Filters the returned indices to only indices that are managed by ILM., master_timeout: any # Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error.}
@returns(200) {indices: map}

@endgroup

@group _ilm
@endpoint GET /_ilm/policy
@desc Get lifecycle policies
@optional {master_timeout: any # Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error., timeout: any # Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error.}
@returns(200)

@endpoint GET /_ilm/status
@desc Get the ILM status
@returns(200) {operation_mode: any}

@endpoint POST /_ilm/migrate_to_data_tiers
@desc Migrate to data tiers routing
@optional {dry_run: bool # If true, simulates the migration from node attributes based allocation filters to data tiers, but does not perform the migration. This provides a way to retrieve the indices and ILM policies that need to be migrated., master_timeout: any # The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. It can also be set to `-1` to indicate that the request should never timeout., legacy_template_to_delete: str, node_attribute: str}
@returns(200) {dry_run: bool, removed_legacy_template: str, migrated_ilm_policies: [str], migrated_indices: any, migrated_legacy_templates: [str], migrated_composable_templates: [str], migrated_component_templates: [str]}
@example_request "{\n  \"legacy_template_to_delete\": \"global-template\",\n  \"node_attribute\": \"custom_attribute_name\"\n}"

@endpoint POST /_ilm/move/{index}
@desc Move to a lifecycle step
@required {index: str # The name of the index whose lifecycle step is to change, current_step: any # The step that the index is expected to be in., next_step: any # The step that you want to run.}
@returns(200) {acknowledged: bool}
@example_request "{\n  \"current_step\": {\n    \"phase\": \"new\",\n    \"action\": \"complete\",\n    \"name\": \"complete\"\n  },\n  \"next_step\": {\n    \"phase\": \"warm\",\n    \"action\": \"forcemerge\",\n    \"name\": \"forcemerge\"\n  }\n}"

@endgroup

@group {index}
@endpoint POST /{index}/_ilm/remove
@desc Remove policies from an index
@required {index: str # The name of the index to remove policy on}
@returns(200) {failed_indexes: [str], has_failures: bool}

@endpoint POST /{index}/_ilm/retry
@desc Retry a policy
@required {index: str # The name of the indices (comma-separated) whose failed lifecycle step is to be retry}
@returns(200) {acknowledged: bool}

@endgroup

@group _ilm
@endpoint POST /_ilm/start
@desc Start the ILM plugin
@optional {master_timeout: any # Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error., timeout: any # Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error.}
@returns(200) {acknowledged: bool}

@endpoint POST /_ilm/stop
@desc Stop the ILM plugin
@optional {master_timeout: any # Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error., timeout: any # Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error.}
@returns(200) {acknowledged: bool}

@endgroup

@group {index}
@endpoint POST /{index}/_doc
@desc Create or update a document in an index
@required {index: str # The name of the data stream or index to target. If the target doesn't exist and matches the name or wildcard (`*`) pattern of an index template with a `data_stream` definition, this request creates the data stream. If the target doesn't exist and doesn't match a data stream template, this request creates the index. You can check for existing targets with the resolve index API.}
@optional {if_primary_term: num # Only perform the operation if the document has this primary term., if_seq_no: num # Only perform the operation if the document has this sequence number., include_source_on_error: bool # True or false if to include the document source in the error message in case of parsing errors., op_type: str # Set to `create` to only index the document if it does not already exist (put if absent). If a document with the specified `_id` already exists, the indexing operation will fail. The behavior is the same as using the `/_create` endpoint. If a document ID is specified, this paramater defaults to `index`. Otherwise, it defaults to `create`. If the request targets a data stream, an `op_type` of `create` is required., pipeline: str # The ID of the pipeline to use to preprocess incoming documents. If the index has a default ingest pipeline specified, then setting the value to `_none` disables the default ingest pipeline for this request. If a final pipeline is configured it will always run, regardless of the value of this parameter., refresh: str # If `true`, Elasticsearch refreshes the affected shards to make this operation visible to search. If `wait_for`, it waits for a refresh to make this operation visible to search. If `false`, it does nothing with refreshes., routing: any # A custom value that is used to route operations to a specific shard., timeout: any # The period the request waits for the following operations: automatic index creation, dynamic mapping updates, waiting for active shards.  This parameter is useful for situations where the primary shard assigned to perform the operation might not be available when the operation runs. Some reasons for this might be that the primary shard is currently recovering from a gateway or undergoing relocation. By default, the operation will wait on the primary shard to become available for at least 1 minute before failing and responding with an error. The actual wait time could be longer, particularly when multiple waits occur., version: num # An explicit version number for concurrency control. It must be a non-negative long number., version_type: str # The version type., wait_for_active_shards: any # The number of shard copies that must be active before proceeding with the operation. You can set it to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). The default value of `1` means it waits for each primary shard to be active., require_alias: bool # If `true`, the destination must be an index alias., require_data_stream: bool # If `true`, the request's actions must target a data stream (existing or to be created).}
@returns(200) {_id: any, _index: any, _primary_term: num, result: any, _seq_no: any, _shards: any, _version: any, failure_store: any, forced_refresh: bool}
@example_request "{\n  \"@timestamp\": \"2099-11-15T13:12:00\",\n  \"message\": \"GET /search HTTP/1.1 200 1070000\",\n  \"user\": {\n    \"id\": \"kimchy\"\n  }\n}"

@endpoint PUT /{index}/_block/{block}
@desc Add an index block
@required {index: any # A comma-separated list or wildcard expression of index names used to limit the request. By default, you must explicitly name the indices you are adding blocks to. To allow the adding of blocks to indices with `_all`, `*`, or other wildcard expressions, change the `action.destructive_requires_name` setting to `false`. You can update this setting in the `elasticsearch.yml` file or by using the cluster update settings API., block: str # The block type to add to the index.}
@optional {allow_no_indices: bool # A setting that does two separate checks on the index expression. If `false`, the request returns an error (1) if any wildcard expression (including `_all` and `*`) resolves to zero matching indices or (2) if the complete set of resolved indices, aliases or data streams is empty after all expressions are evaluated. If `true`, index expressions that resolve to no indices are allowed and the request returns an empty result., expand_wildcards: any # The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. It supports comma-separated values, such as `open,hidden`., ignore_unavailable: bool # If `false`, the request returns an error if it targets a concrete (non-wildcarded) index, alias, or data stream that is missing, closed, or otherwise unavailable. If `true`, unavailable concrete targets are silently ignored., master_timeout: any # The period to wait for the master node. If the master node is not available before the timeout expires, the request fails and returns an error. It can also be set to `-1` to indicate that the request should never timeout., timeout: any # The period to wait for a response from all relevant nodes in the cluster after updating the cluster metadata. If no response is received before the timeout expires, the cluster metadata update still applies but the response will indicate that it was not completely acknowledged. It can also be set to `-1` to indicate that the request should never timeout.}
@returns(200) {acknowledged: bool, shards_acknowledged: bool, indices: [map]}

@endpoint DELETE /{index}/_block/{block}
@desc Remove an index block
@required {index: any # A comma-separated list or wildcard expression of index names used to limit the request. By default, you must explicitly name the indices you are removing blocks from. To allow the removal of blocks from indices with `_all`, `*`, or other wildcard expressions, change the `action.destructive_requires_name` setting to `false`. You can update this setting in the `elasticsearch.yml` file or by using the cluster update settings API., block: str # The block type to remove from the index.}
@optional {allow_no_indices: bool # A setting that does two separate checks on the index expression. If `false`, the request returns an error (1) if any wildcard expression (including `_all` and `*`) resolves to zero matching indices or (2) if the complete set of resolved indices, aliases or data streams is empty after all expressions are evaluated. If `true`, index expressions that resolve to no indices are allowed and the request returns an empty result., expand_wildcards: any # The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. It supports comma-separated values, such as `open,hidden`., ignore_unavailable: bool # If `false`, the request returns an error if it targets a concrete (non-wildcarded) index, alias, or data stream that is missing, closed, or otherwise unavailable. If `true`, unavailable concrete targets are silently ignored., master_timeout: any # The period to wait for the master node. If the master node is not available before the timeout expires, the request fails and returns an error. It can also be set to `-1` to indicate that the request should never timeout., timeout: any # The period to wait for a response from all relevant nodes in the cluster after updating the cluster metadata. If no response is received before the timeout expires, the cluster metadata update still applies but the response will indicate that it was not completely acknowledged. It can also be set to `-1` to indicate that the request should never timeout.}
@returns(200) {acknowledged: bool, indices: [map]}

@endgroup

@group _analyze
@endpoint GET /_analyze
@desc Get tokens from text analysis
@optional {index: str # Index used to derive the analyzer. If specified, the `analyzer` or field parameter overrides this value. If no index is specified or the index does not have a default analyzer, the analyze API uses the standard analyzer., analyzer: str # The name of the analyzer that should be applied to the provided `text`. This could be a built-in analyzer, or an analyzer that’s been configured in the index., attributes: [str] # Array of token attributes used to filter the output of the `explain` parameter., char_filter: [any] # Array of character filters used to preprocess characters before the tokenizer., explain: bool=false # If `true`, the response includes token attributes and additional details., field: any # Field used to derive the analyzer. To use this parameter, you must specify an index. If specified, the `analyzer` parameter overrides this value., filter: [any] # Array of token filters used to apply after the tokenizer., normalizer: str # Normalizer to use to convert text into a single token., text: any # Text to analyze. If an array of strings is provided, it is analyzed as a multi-value field., tokenizer: any # Tokenizer to use to convert text into tokens.}
@returns(200) {detail: any, tokens: [map]}
@example_request "{\n  \"analyzer\": \"standard\",\n  \"text\": \"this is a test\"\n}"

@endpoint POST /_analyze
@desc Get tokens from text analysis
@optional {index: str # Index used to derive the analyzer. If specified, the `analyzer` or field parameter overrides this value. If no index is specified or the index does not have a default analyzer, the analyze API uses the standard analyzer., analyzer: str # The name of the analyzer that should be applied to the provided `text`. This could be a built-in analyzer, or an analyzer that’s been configured in the index., attributes: [str] # Array of token attributes used to filter the output of the `explain` parameter., char_filter: [any] # Array of character filters used to preprocess characters before the tokenizer., explain: bool=false # If `true`, the response includes token attributes and additional details., field: any # Field used to derive the analyzer. To use this parameter, you must specify an index. If specified, the `analyzer` parameter overrides this value., filter: [any] # Array of token filters used to apply after the tokenizer., normalizer: str # Normalizer to use to convert text into a single token., text: any # Text to analyze. If an array of strings is provided, it is analyzed as a multi-value field., tokenizer: any # Tokenizer to use to convert text into tokens.}
@returns(200) {detail: any, tokens: [map]}
@example_request "{\n  \"analyzer\": \"standard\",\n  \"text\": \"this is a test\"\n}"

@endgroup

@group {index}
@endpoint GET /{index}/_analyze
@desc Get tokens from text analysis
@required {index: str # Index used to derive the analyzer. If specified, the `analyzer` or field parameter overrides this value. If no index is specified or the index does not have a default analyzer, the analyze API uses the standard analyzer.}
@optional {index: str # Index used to derive the analyzer. If specified, the `analyzer` or field parameter overrides this value. If no index is specified or the index does not have a default analyzer, the analyze API uses the standard analyzer., analyzer: str # The name of the analyzer that should be applied to the provided `text`. This could be a built-in analyzer, or an analyzer that’s been configured in the index., attributes: [str] # Array of token attributes used to filter the output of the `explain` parameter., char_filter: [any] # Array of character filters used to preprocess characters before the tokenizer., explain: bool=false # If `true`, the response includes token attributes and additional details., field: any # Field used to derive the analyzer. To use this parameter, you must specify an index. If specified, the `analyzer` parameter overrides this value., filter: [any] # Array of token filters used to apply after the tokenizer., normalizer: str # Normalizer to use to convert text into a single token., text: any # Text to analyze. If an array of strings is provided, it is analyzed as a multi-value field., tokenizer: any # Tokenizer to use to convert text into tokens.}
@returns(200) {detail: any, tokens: [map]}
@example_request "{\n  \"analyzer\": \"standard\",\n  \"text\": \"this is a test\"\n}"

@endpoint POST /{index}/_analyze
@desc Get tokens from text analysis
@required {index: str # Index used to derive the analyzer. If specified, the `analyzer` or field parameter overrides this value. If no index is specified or the index does not have a default analyzer, the analyze API uses the standard analyzer.}
@optional {index: str # Index used to derive the analyzer. If specified, the `analyzer` or field parameter overrides this value. If no index is specified or the index does not have a default analyzer, the analyze API uses the standard analyzer., analyzer: str # The name of the analyzer that should be applied to the provided `text`. This could be a built-in analyzer, or an analyzer that’s been configured in the index., attributes: [str] # Array of token attributes used to filter the output of the `explain` parameter., char_filter: [any] # Array of character filters used to preprocess characters before the tokenizer., explain: bool=false # If `true`, the response includes token attributes and additional details., field: any # Field used to derive the analyzer. To use this parameter, you must specify an index. If specified, the `analyzer` parameter overrides this value., filter: [any] # Array of token filters used to apply after the tokenizer., normalizer: str # Normalizer to use to convert text into a single token., text: any # Text to analyze. If an array of strings is provided, it is analyzed as a multi-value field., tokenizer: any # Tokenizer to use to convert text into tokens.}
@returns(200) {detail: any, tokens: [map]}
@example_request "{\n  \"analyzer\": \"standard\",\n  \"text\": \"this is a test\"\n}"

@endgroup

@group _migration
@endpoint POST /_migration/reindex/{index}/_cancel
@desc Cancel a migration reindex operation
@required {index: any # The index or data stream name}
@returns(200) {acknowledged: bool}

@endgroup

@group _cache
@endpoint POST /_cache/clear
@desc Clear the cache
@optional {index: any # Comma-separated list of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`., allow_no_indices: bool # A setting that does two separate checks on the index expression. If `false`, the request returns an error (1) if any wildcard expression (including `_all` and `*`) resolves to zero matching indices or (2) if the complete set of resolved indices, aliases or data streams is empty after all expressions are evaluated. If `true`, index expressions that resolve to no indices are allowed and the request returns an empty result., expand_wildcards: any # Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`., fielddata: bool # If `true`, clears the fields cache. Use the `fields` parameter to clear the cache of specific fields only., fields: any # Comma-separated list of field names used to limit the `fielddata` parameter., ignore_unavailable: bool # If `false`, the request returns an error if it targets a concrete (non-wildcarded) index, alias, or data stream that is missing, closed, or otherwise unavailable. If `true`, unavailable concrete targets are silently ignored., query: bool # If `true`, clears the query cache., request: bool # If `true`, clears the request cache.}
@returns(200) {_shards: any}

@endgroup

@group {index}
@endpoint POST /{index}/_cache/clear
@desc Clear the cache
@required {index: any # Comma-separated list of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`.}
@optional {index: any # Comma-separated list of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`., allow_no_indices: bool # A setting that does two separate checks on the index expression. If `false`, the request returns an error (1) if any wildcard expression (including `_all` and `*`) resolves to zero matching indices or (2) if the complete set of resolved indices, aliases or data streams is empty after all expressions are evaluated. If `true`, index expressions that resolve to no indices are allowed and the request returns an empty result., expand_wildcards: any # Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`., fielddata: bool # If `true`, clears the fields cache. Use the `fields` parameter to clear the cache of specific fields only., fields: any # Comma-separated list of field names used to limit the `fielddata` parameter., ignore_unavailable: bool # If `false`, the request returns an error if it targets a concrete (non-wildcarded) index, alias, or data stream that is missing, closed, or otherwise unavailable. If `true`, unavailable concrete targets are silently ignored., query: bool # If `true`, clears the query cache., request: bool # If `true`, clears the request cache.}
@returns(200) {_shards: any}

@endpoint PUT /{index}/_clone/{target}
@desc Clone an index
@required {index: str # Name of the source index to clone., target: str # Name of the target index to create.}
@optional {master_timeout: any # Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error., timeout: any # Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error., wait_for_active_shards: any # The number of shard copies that must be active before proceeding with the operation. Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`)., aliases: map # Aliases for the resulting index., settings: map # Configuration options for the target index.}
@returns(200) {acknowledged: bool, index: any, shards_acknowledged: bool}
@example_request "{\n  \"settings\": {\n    \"index.refresh_interval\": \"2s\"\n  },\n  \"aliases\": {\n    \"my_search_indices\": {}\n  }\n}"

@endpoint POST /{index}/_clone/{target}
@desc Clone an index
@required {index: str # Name of the source index to clone., target: str # Name of the target index to create.}
@optional {master_timeout: any # Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error., timeout: any # Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error., wait_for_active_shards: any # The number of shard copies that must be active before proceeding with the operation. Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`)., aliases: map # Aliases for the resulting index., settings: map # Configuration options for the target index.}
@returns(200) {acknowledged: bool, index: any, shards_acknowledged: bool}
@example_request "{\n  \"settings\": {\n    \"index.refresh_interval\": \"2s\"\n  },\n  \"aliases\": {\n    \"my_search_indices\": {}\n  }\n}"

@endpoint POST /{index}/_close
@desc Close an index
@required {index: any # Comma-separated list or wildcard expression of index names used to limit the request.}
@optional {allow_no_indices: bool # A setting that does two separate checks on the index expression. If `false`, the request returns an error (1) if any wildcard expression (including `_all` and `*`) resolves to zero matching indices or (2) if the complete set of resolved indices, aliases or data streams is empty after all expressions are evaluated. If `true`, index expressions that resolve to no indices are allowed and the request returns an empty result., expand_wildcards: any # Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`., ignore_unavailable: bool # If `false`, the request returns an error if it targets a concrete (non-wildcarded) index, alias, or data stream that is missing, closed, or otherwise unavailable. If `true`, unavailable concrete targets are silently ignored., master_timeout: any # Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error., timeout: any # Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error., wait_for_active_shards: any # The number of shard copies that must be active before proceeding with the operation. Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`).}
@returns(200) {acknowledged: bool, indices: map, shards_acknowledged: bool}

@endpoint GET /{index}
@desc Get index information
@required {index: any # Comma-separated list of data streams, indices, and index aliases used to limit the request. Wildcard expressions (*) are supported.}
@optional {allow_no_indices: bool # A setting that does two separate checks on the index expression. If `false`, the request returns an error (1) if any wildcard expression (including `_all` and `*`) resolves to zero matching indices or (2) if the complete set of resolved indices, aliases or data streams is empty after all expressions are evaluated. If `true`, index expressions that resolve to no indices are allowed and the request returns an empty result., expand_wildcards: any # Type of index that wildcard expressions can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as open,hidden., flat_settings: bool # If true, returns settings in flat format., ignore_unavailable: bool # If `false`, the request returns an error if it targets a concrete (non-wildcarded) index, alias, or data stream that is missing, closed, or otherwise unavailable. If `true`, unavailable concrete targets are silently ignored., include_defaults: bool # If true, return all default settings in the response., local: bool # If true, the request retrieves information from the local node only. Defaults to false, which means information is retrieved from the master node., master_timeout: any # Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error., features: any # Return only information on specified index features}
@returns(200)

@endpoint PUT /{index}
@desc Create an index
@required {index: str # Name of the index you wish to create. Index names must meet the following criteria:  * Lowercase only * Cannot include `\`, `/`, `*`, `?`, `"`, ``, `|`, ` ` (space character), `,`, or `#` * Indices prior to 7.0 could contain a colon (`:`), but that has been deprecated and will not be supported in later versions * Cannot start with `-`, `_`, or `+` * Cannot be `.` or `..` * Cannot be longer than 255 bytes (note thtat it is bytes, so multi-byte characters will reach the limit faster) * Names starting with `.` are deprecated, except for hidden indices and internal indices managed by plugins}
@optional {master_timeout: any # Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error., timeout: any # Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error., wait_for_active_shards: any # The number of shard copies that must be active before proceeding with the operation. Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`)., aliases: map # Aliases for the index., mappings: any # Mapping for fields in the index. If specified, this mapping can include: - Field names - Field data types - Mapping parameters, settings: any # Configuration options for the index.}
@returns(200) {index: any, shards_acknowledged: bool, acknowledged: bool}
@example_request "{\n  \"settings\": {\n    \"number_of_shards\": 3,\n    \"number_of_replicas\": 2\n  }\n}"

@endpoint DELETE /{index}
@desc Delete indices
@required {index: any # Comma-separated list of indices to delete. You cannot specify index aliases. By default, this parameter does not support wildcards (`*`) or `_all`. To use wildcards or `_all`, set the `action.destructive_requires_name` cluster setting to `false`.}
@optional {allow_no_indices: bool # A setting that does two separate checks on the index expression. If `false`, the request returns an error (1) if any wildcard expression (including `_all` and `*`) resolves to zero matching indices or (2) if the complete set of resolved indices, aliases or data streams is empty after all expressions are evaluated. If `true`, index expressions that resolve to no indices are allowed and the request returns an empty result., expand_wildcards: any # Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`., ignore_unavailable: bool # If `false`, the request returns an error if it targets a concrete (non-wildcarded) index, alias, or data stream that is missing, closed, or otherwise unavailable. If `true`, unavailable concrete targets are silently ignored., master_timeout: any # Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error., timeout: any # Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error.}
@returns(200)

@endpoint HEAD /{index}
@desc Check indices
@required {index: any # Comma-separated list of data streams, indices, and aliases. Supports wildcards (`*`).}
@optional {allow_no_indices: bool # A setting that does two separate checks on the index expression. If `false`, the request returns an error (1) if any wildcard expression (including `_all` and `*`) resolves to zero matching indices or (2) if the complete set of resolved indices, aliases or data streams is empty after all expressions are evaluated. If `true`, index expressions that resolve to no indices are allowed and the request returns an empty result., expand_wildcards: any # Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`., flat_settings: bool # If `true`, returns settings in flat format., ignore_unavailable: bool # If `false`, the request returns an error if it targets a concrete (non-wildcarded) index, alias, or data stream that is missing, closed, or otherwise unavailable. If `true`, unavailable concrete targets are silently ignored., include_defaults: bool # If `true`, return all default settings in the response., local: bool # If `true`, the request retrieves information from the local node only.}
@returns(200)

@endgroup

@group _data_stream
@endpoint GET /_data_stream/{name}
@desc Get data streams
@required {name: any # Comma-separated list of data stream names used to limit the request. Wildcard (`*`) expressions are supported. If omitted, all data streams are returned.}
@optional {expand_wildcards: any # Type of data stream that wildcard patterns can match. Supports comma-separated values, such as `open,hidden`., include_defaults: bool # If true, returns all relevant default configurations for the index template., master_timeout: any # Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error., verbose: bool # Whether the maximum timestamp for each data stream should be calculated and returned.}
@returns(200) {data_streams: [map]}

@endpoint PUT /_data_stream/{name}
@desc Create a data stream
@required {name: str # Name of the data stream, which must meet the following criteria: Lowercase only; Cannot include `\`, `/`, `*`, `?`, `"`, ``, `|`, `,`, `#`, `:`, or a space character; Cannot start with `-`, `_`, `+`, or `.ds-`; Cannot be `.` or `..`; Cannot be longer than 255 bytes. Multi-byte characters count towards this limit faster.}
@optional {master_timeout: any # Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error., timeout: any # Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error.}
@returns(200) {acknowledged: bool}

@endpoint DELETE /_data_stream/{name}
@desc Delete data streams
@required {name: any # Comma-separated list of data streams to delete. Wildcard (`*`) expressions are supported.}
@optional {master_timeout: any # Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error., expand_wildcards: any # Type of data stream that wildcard patterns can match. Supports comma-separated values,such as `open,hidden`.}
@returns(200) {acknowledged: bool}

@endgroup

@group _create_from
@endpoint PUT /_create_from/{source}/{dest}
@desc Create an index from a source index
@required {source: str # The source index or data stream name, dest: str # The destination index or data stream name}
@optional {mappings_override: any # Mappings overrides to be applied to the destination index (optional), settings_override: any # Settings overrides to be applied to the destination index (optional), remove_index_blocks: bool=true # If index blocks should be removed when creating destination index (optional)}
@returns(200) {acknowledged: bool, index: any, shards_acknowledged: bool}

@endpoint POST /_create_from/{source}/{dest}
@desc Create an index from a source index
@required {source: str # The source index or data stream name, dest: str # The destination index or data stream name}
@optional {mappings_override: any # Mappings overrides to be applied to the destination index (optional), settings_override: any # Settings overrides to be applied to the destination index (optional), remove_index_blocks: bool=true # If index blocks should be removed when creating destination index (optional)}
@returns(200) {acknowledged: bool, index: any, shards_acknowledged: bool}

@endgroup

@group _data_stream
@endpoint GET /_data_stream/_stats
@desc Get data stream stats
@optional {expand_wildcards: any # Type of data stream that wildcard patterns can match. Supports comma-separated values, such as `open,hidden`.}
@returns(200) {_shards: any, backing_indices: num, data_stream_count: num, data_streams: [map], total_store_sizes: any, total_store_size_bytes: num}

@endpoint GET /_data_stream/{name}/_stats
@desc Get data stream stats
@required {name: any # Comma-separated list of data streams used to limit the request. Wildcard expressions (`*`) are supported. To target all data streams in a cluster, omit this parameter or use `*`.}
@optional {expand_wildcards: any # Type of data stream that wildcard patterns can match. Supports comma-separated values, such as `open,hidden`.}
@returns(200) {_shards: any, backing_indices: num, data_stream_count: num, data_streams: [map], total_store_sizes: any, total_store_size_bytes: num}

@endgroup

@group {index}
@endpoint GET /{index}/_alias/{name}
@desc Get aliases
@required {index: any # Comma-separated list of data streams or indices used to limit the request. Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`., name: any # Comma-separated list of aliases to retrieve. Supports wildcards (`*`). To retrieve all aliases, omit this parameter or use `*` or `_all`.}
@optional {allow_no_indices: bool # A setting that does two separate checks on the index expression. If `false`, the request returns an error (1) if any wildcard expression (including `_all` and `*`) resolves to zero matching indices or (2) if the complete set of resolved indices, aliases or data streams is empty after all expressions are evaluated. If `true`, index expressions that resolve to no indices are allowed and the request returns an empty result., expand_wildcards: any # Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`., ignore_unavailable: bool # If `false`, the request returns an error if it targets a concrete (non-wildcarded) index, alias, or data stream that is missing, closed, or otherwise unavailable. If `true`, unavailable concrete targets are silently ignored., master_timeout: any # Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error.}
@returns(200)

@endpoint PUT /{index}/_alias/{name}
@desc Create or update an alias
@required {index: any # Comma-separated list of data streams or indices to add. Supports wildcards (`*`). Wildcard patterns that match both data streams and indices return an error., name: str # Alias to update. If the alias doesn’t exist, the request creates it. Index alias names support date math.}
@optional {master_timeout: any # Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error., timeout: any # Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error., filter: any # Query used to limit documents the alias can access., index_routing: str # Value used to route indexing operations to a specific shard. If specified, this overwrites the `routing` value for indexing operations. Data stream aliases don’t support this parameter., is_write_index: bool # If `true`, sets the write index or data stream for the alias. If an alias points to multiple indices or data streams and `is_write_index` isn’t set, the alias rejects write requests. If an index alias points to one index and `is_write_index` isn’t set, the index automatically acts as the write index. Data stream aliases don’t automatically set a write data stream, even if the alias points to one data stream., routing: str # Value used to route indexing and search operations to a specific shard. Data stream aliases don’t support this parameter., search_routing: str # Value used to route search operations to a specific shard. If specified, this overwrites the `routing` value for search operations. Data stream aliases don’t support this parameter.}
@returns(200) {acknowledged: bool}
@example_request "{\n  \"filter\": {\n    \"bool\": {\n      \"filter\": [\n        {\n          \"range\": {\n            \"@timestamp\": {\n              \"gte\": \"now-1d/d\",\n              \"lt\": \"now/d\"\n            }\n          }\n        },\n        {\n          \"term\": {\n            \"user.id\": \"kimchy\"\n          }\n        }\n      ]\n    }\n  }\n}"

@endpoint POST /{index}/_alias/{name}
@desc Create or update an alias
@required {index: any # Comma-separated list of data streams or indices to add. Supports wildcards (`*`). Wildcard patterns that match both data streams and indices return an error., name: str # Alias to update. If the alias doesn’t exist, the request creates it. Index alias names support date math.}
@optional {master_timeout: any # Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error., timeout: any # Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error., filter: any # Query used to limit documents the alias can access., index_routing: str # Value used to route indexing operations to a specific shard. If specified, this overwrites the `routing` value for indexing operations. Data stream aliases don’t support this parameter., is_write_index: bool # If `true`, sets the write index or data stream for the alias. If an alias points to multiple indices or data streams and `is_write_index` isn’t set, the alias rejects write requests. If an index alias points to one index and `is_write_index` isn’t set, the index automatically acts as the write index. Data stream aliases don’t automatically set a write data stream, even if the alias points to one data stream., routing: str # Value used to route indexing and search operations to a specific shard. Data stream aliases don’t support this parameter., search_routing: str # Value used to route search operations to a specific shard. If specified, this overwrites the `routing` value for search operations. Data stream aliases don’t support this parameter.}
@returns(200) {acknowledged: bool}
@example_request "{\n  \"filter\": {\n    \"bool\": {\n      \"filter\": [\n        {\n          \"range\": {\n            \"@timestamp\": {\n              \"gte\": \"now-1d/d\",\n              \"lt\": \"now/d\"\n            }\n          }\n        },\n        {\n          \"term\": {\n            \"user.id\": \"kimchy\"\n          }\n        }\n      ]\n    }\n  }\n}"

@endpoint DELETE /{index}/_alias/{name}
@desc Delete an alias
@required {index: any # Comma-separated list of data streams or indices used to limit the request. Supports wildcards (`*`)., name: any # Comma-separated list of aliases to remove. Supports wildcards (`*`). To remove all aliases, use `*` or `_all`.}
@optional {master_timeout: any # Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error., timeout: any # Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error.}
@returns(200)

@endpoint HEAD /{index}/_alias/{name}
@desc Check aliases
@required {index: any # Comma-separated list of data streams or indices used to limit the request. Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`., name: any # Comma-separated list of aliases to check. Supports wildcards (`*`).}
@optional {allow_no_indices: bool # A setting that does two separate checks on the index expression. If `false`, the request returns an error (1) if any wildcard expression (including `_all` and `*`) resolves to zero matching indices or (2) if the complete set of resolved indices, aliases or data streams is empty after all expressions are evaluated. If `true`, index expressions that resolve to no indices are allowed and the request returns an empty result., expand_wildcards: any # Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`., ignore_unavailable: bool # If `false`, the request returns an error if it targets a concrete (non-wildcarded) index, alias, or data stream that is missing, closed, or otherwise unavailable. If `true`, unavailable concrete targets are silently ignored., master_timeout: any # Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error.}
@returns(200)

@endpoint PUT /{index}/_aliases/{name}
@desc Create or update an alias
@required {index: any # Comma-separated list of data streams or indices to add. Supports wildcards (`*`). Wildcard patterns that match both data streams and indices return an error., name: str # Alias to update. If the alias doesn’t exist, the request creates it. Index alias names support date math.}
@optional {master_timeout: any # Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error., timeout: any # Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error., filter: any # Query used to limit documents the alias can access., index_routing: str # Value used to route indexing operations to a specific shard. If specified, this overwrites the `routing` value for indexing operations. Data stream aliases don’t support this parameter., is_write_index: bool # If `true`, sets the write index or data stream for the alias. If an alias points to multiple indices or data streams and `is_write_index` isn’t set, the alias rejects write requests. If an index alias points to one index and `is_write_index` isn’t set, the index automatically acts as the write index. Data stream aliases don’t automatically set a write data stream, even if the alias points to one data stream., routing: str # Value used to route indexing and search operations to a specific shard. Data stream aliases don’t support this parameter., search_routing: str # Value used to route search operations to a specific shard. If specified, this overwrites the `routing` value for search operations. Data stream aliases don’t support this parameter.}
@returns(200) {acknowledged: bool}
@example_request "{\n  \"filter\": {\n    \"bool\": {\n      \"filter\": [\n        {\n          \"range\": {\n            \"@timestamp\": {\n              \"gte\": \"now-1d/d\",\n              \"lt\": \"now/d\"\n            }\n          }\n        },\n        {\n          \"term\": {\n            \"user.id\": \"kimchy\"\n          }\n        }\n      ]\n    }\n  }\n}"

@endpoint POST /{index}/_aliases/{name}
@desc Create or update an alias
@required {index: any # Comma-separated list of data streams or indices to add. Supports wildcards (`*`). Wildcard patterns that match both data streams and indices return an error., name: str # Alias to update. If the alias doesn’t exist, the request creates it. Index alias names support date math.}
@optional {master_timeout: any # Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error., timeout: any # Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error., filter: any # Query used to limit documents the alias can access., index_routing: str # Value used to route indexing operations to a specific shard. If specified, this overwrites the `routing` value for indexing operations. Data stream aliases don’t support this parameter., is_write_index: bool # If `true`, sets the write index or data stream for the alias. If an alias points to multiple indices or data streams and `is_write_index` isn’t set, the alias rejects write requests. If an index alias points to one index and `is_write_index` isn’t set, the index automatically acts as the write index. Data stream aliases don’t automatically set a write data stream, even if the alias points to one data stream., routing: str # Value used to route indexing and search operations to a specific shard. Data stream aliases don’t support this parameter., search_routing: str # Value used to route search operations to a specific shard. If specified, this overwrites the `routing` value for search operations. Data stream aliases don’t support this parameter.}
@returns(200) {acknowledged: bool}
@example_request "{\n  \"filter\": {\n    \"bool\": {\n      \"filter\": [\n        {\n          \"range\": {\n            \"@timestamp\": {\n              \"gte\": \"now-1d/d\",\n              \"lt\": \"now/d\"\n            }\n          }\n        },\n        {\n          \"term\": {\n            \"user.id\": \"kimchy\"\n          }\n        }\n      ]\n    }\n  }\n}"

@endpoint DELETE /{index}/_aliases/{name}
@desc Delete an alias
@required {index: any # Comma-separated list of data streams or indices used to limit the request. Supports wildcards (`*`)., name: any # Comma-separated list of aliases to remove. Supports wildcards (`*`). To remove all aliases, use `*` or `_all`.}
@optional {master_timeout: any # Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error., timeout: any # Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error.}
@returns(200)

@endgroup

@group _data_stream
@endpoint GET /_data_stream/{name}/_lifecycle
@desc Get data stream lifecycles
@required {name: any # Comma-separated list of data streams to limit the request. Supports wildcards (`*`). To target all data streams, omit this parameter or use `*` or `_all`.}
@optional {expand_wildcards: any # Type of data stream that wildcard patterns can match. Supports comma-separated values, such as `open,hidden`., include_defaults: bool # If `true`, return all default settings in the response., master_timeout: any # Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error.}
@returns(200) {data_streams: [map]}

@endpoint PUT /_data_stream/{name}/_lifecycle
@desc Update data stream lifecycles
@required {name: any # Comma-separated list of data streams used to limit the request. Supports wildcards (`*`). To target all data streams use `*` or `_all`.}
@optional {expand_wildcards: any # Type of data stream that wildcard patterns can match. Supports comma-separated values, such as `open,hidden`., master_timeout: any # Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error., timeout: any # Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error., data_retention: any # If defined, every document added to this data stream will be stored at least for this time frame. Any time after this duration the document could be deleted. When empty, every document in this data stream will be stored indefinitely., downsampling: [map{after!: any, fixed_interval!: any}] # The downsampling configuration to execute for the managed backing index after rollover., downsampling_method: any # The method used to downsample the data. There are two options `aggregate` and `last_value`. It requires `downsampling` to be defined. Defaults to `aggregate`., enabled: bool=true # If defined, it turns data stream lifecycle on/off (`true`/`false`) for this data stream. A data stream lifecycle that's disabled (enabled: `false`) will have no effect on the data stream.}
@returns(200) {acknowledged: bool}
@example_request "{\n  \"data_retention\": \"7d\"\n}"

@endpoint DELETE /_data_stream/{name}/_lifecycle
@desc Delete data stream lifecycles
@required {name: any # A comma-separated list of data streams of which the data stream lifecycle will be deleted. Use `*` to get all data streams}
@optional {expand_wildcards: any # Whether wildcard expressions should get expanded to open or closed indices (default: open), master_timeout: any # The period to wait for a connection to the master node., timeout: any # The period to wait for a response.}
@returns(200) {acknowledged: bool}

@endpoint GET /_data_stream/{name}/_options
@desc Get data stream options
@required {name: any # Comma-separated list of data streams to limit the request. Supports wildcards (`*`). To target all data streams, omit this parameter or use `*` or `_all`.}
@optional {expand_wildcards: any # Type of data stream that wildcard patterns can match. Supports comma-separated values, such as `open,hidden`., master_timeout: any # Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error.}
@returns(200) {data_streams: [map]}

@endpoint PUT /_data_stream/{name}/_options
@desc Update data stream options
@required {name: any # Comma-separated list of data streams used to limit the request. Supports wildcards (`*`). To target all data streams use `*` or `_all`.}
@optional {expand_wildcards: any # Type of data stream that wildcard patterns can match. Supports comma-separated values, such as `open,hidden`., master_timeout: any # Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error., timeout: any # Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error., failure_store: any # If defined, it will update the failure store configuration of every data stream resolved by the name expression.}
@returns(200) {acknowledged: bool}

@endpoint DELETE /_data_stream/{name}/_options
@desc Delete data stream options
@required {name: any # A comma-separated list of data streams of which the data stream options will be deleted. Use `*` to get all data streams}
@optional {expand_wildcards: any # Whether wildcard expressions should get expanded to open or closed indices, master_timeout: any # The period to wait for a connection to the master node., timeout: any # The period to wait for a response.}
@returns(200) {acknowledged: bool}

@endgroup

@group _index_template
@endpoint GET /_index_template/{name}
@desc Get index templates
@required {name: str # Name of index template to retrieve. Wildcard (*) expressions are supported.}
@optional {local: bool # If true, the request retrieves information from the local node only. Defaults to false, which means information is retrieved from the master node., flat_settings: bool # If true, returns settings in flat format., master_timeout: any # Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error., include_defaults: bool # If true, returns all relevant default configurations for the index template.}
@returns(200) {index_templates: [map]}

@endpoint PUT /_index_template/{name}
@desc Create or update an index template
@required {name: str # Index or template name}
@optional {create: bool # If `true`, this request cannot replace or update existing index templates., master_timeout: any # Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error., cause: str # User defined reason for creating or updating the index template, index_patterns: any # Array of wildcard (`*`) expressions used to match the names of data streams and indices during creation., composed_of: [str] # An ordered list of component template names. Component templates are merged in the order specified, meaning that the last component template specified has the highest precedence., template: any # Template to be applied. It may optionally include an `aliases`, `mappings`, or `settings` configuration., data_stream: any # If this object is included, the template is used to create data streams and their backing indices. Supports an empty object. Data streams require a matching index template with a `data_stream` object., priority: num # Priority to determine index template precedence when a new data stream or index is created. The index template with the highest priority is chosen. If no priority is specified the template is treated as though it is of priority 0 (lowest priority). This number is not automatically generated by Elasticsearch., version: any # Version number used to manage index templates externally. This number is not automatically generated by Elasticsearch. External systems can use these version numbers to simplify template management. To unset a version, replace the template without specifying one., _meta: any # Optional user metadata about the index template. It may have any contents. It is not automatically generated or used by Elasticsearch. This user-defined object is stored in the cluster state, so keeping it short is preferable To unset the metadata, replace the template without specifying it., allow_auto_create: bool # This setting overrides the value of the `action.auto_create_index` cluster setting. If set to `true` in a template, then indices can be automatically created using that template even if auto-creation of indices is disabled via `actions.auto_create_index`. If set to `false`, then indices or data streams matching the template must always be explicitly created, and may never be automatically created., ignore_missing_component_templates: [str] # The configuration option ignore_missing_component_templates can be used when an index template references a component template that might not exist, deprecated: bool # Marks this index template as deprecated. When creating or updating a non-deprecated index template that uses deprecated components, Elasticsearch will emit a deprecation warning.}
@returns(200) {acknowledged: bool}
@example_request "{\n  \"index_patterns\" : [\"template*\"],\n  \"priority\" : 1,\n  \"template\": {\n    \"settings\" : {\n      \"number_of_shards\" : 2\n    }\n  }\n}"

@endpoint POST /_index_template/{name}
@desc Create or update an index template
@required {name: str # Index or template name}
@optional {create: bool # If `true`, this request cannot replace or update existing index templates., master_timeout: any # Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error., cause: str # User defined reason for creating or updating the index template, index_patterns: any # Array of wildcard (`*`) expressions used to match the names of data streams and indices during creation., composed_of: [str] # An ordered list of component template names. Component templates are merged in the order specified, meaning that the last component template specified has the highest precedence., template: any # Template to be applied. It may optionally include an `aliases`, `mappings`, or `settings` configuration., data_stream: any # If this object is included, the template is used to create data streams and their backing indices. Supports an empty object. Data streams require a matching index template with a `data_stream` object., priority: num # Priority to determine index template precedence when a new data stream or index is created. The index template with the highest priority is chosen. If no priority is specified the template is treated as though it is of priority 0 (lowest priority). This number is not automatically generated by Elasticsearch., version: any # Version number used to manage index templates externally. This number is not automatically generated by Elasticsearch. External systems can use these version numbers to simplify template management. To unset a version, replace the template without specifying one., _meta: any # Optional user metadata about the index template. It may have any contents. It is not automatically generated or used by Elasticsearch. This user-defined object is stored in the cluster state, so keeping it short is preferable To unset the metadata, replace the template without specifying it., allow_auto_create: bool # This setting overrides the value of the `action.auto_create_index` cluster setting. If set to `true` in a template, then indices can be automatically created using that template even if auto-creation of indices is disabled via `actions.auto_create_index`. If set to `false`, then indices or data streams matching the template must always be explicitly created, and may never be automatically created., ignore_missing_component_templates: [str] # The configuration option ignore_missing_component_templates can be used when an index template references a component template that might not exist, deprecated: bool # Marks this index template as deprecated. When creating or updating a non-deprecated index template that uses deprecated components, Elasticsearch will emit a deprecation warning.}
@returns(200) {acknowledged: bool}
@example_request "{\n  \"index_patterns\" : [\"template*\"],\n  \"priority\" : 1,\n  \"template\": {\n    \"settings\" : {\n      \"number_of_shards\" : 2\n    }\n  }\n}"

@endpoint DELETE /_index_template/{name}
@desc Delete an index template
@required {name: any # Comma-separated list of index template names used to limit the request. Wildcard (*) expressions are supported.}
@optional {master_timeout: any # Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error., timeout: any # Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error.}
@returns(200) {acknowledged: bool}

@endpoint HEAD /_index_template/{name}
@desc Check index templates
@required {name: str # Comma-separated list of index template names used to limit the request. Wildcard (*) expressions are supported.}
@optional {local: bool # If true, the request retrieves information from the local node only. Defaults to false, which means information is retrieved from the master node., flat_settings: bool # If true, returns settings in flat format., master_timeout: any # Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error.}
@returns(200)

@endgroup

@group _template
@endpoint GET /_template/{name}
@desc Get legacy index templates
@required {name: any # Comma-separated list of index template names used to limit the request. Wildcard (`*`) expressions are supported. To return all index templates, omit this parameter or use a value of `_all` or `*`.}
@optional {flat_settings: bool # If `true`, returns settings in flat format., local: bool # If `true`, the request retrieves information from the local node only., master_timeout: any # Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error.}
@returns(200)

@endpoint PUT /_template/{name}
@desc Create or update a legacy index template
@required {name: str # The name of the template}
@optional {create: bool # If true, this request cannot replace or update existing index templates., master_timeout: any # Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error., order: num # Order in which Elasticsearch applies this template if index matches multiple templates.  Templates with lower 'order' values are merged first. Templates with higher 'order' values are merged later, overriding templates with lower values., cause: str # User defined reason for creating or updating the index template, aliases: map # Aliases for the index., index_patterns: any # Array of wildcard expressions used to match the names of indices during creation., mappings: any # Mapping for fields in the index., order: num # Order in which Elasticsearch applies this template if index matches multiple templates.  Templates with lower 'order' values are merged first. Templates with higher 'order' values are merged later, overriding templates with lower values., settings: any # Configuration options for the index., version: any # Version number used to manage index templates externally. This number is not automatically generated by Elasticsearch. To unset a version, replace the template without specifying one.}
@returns(200) {acknowledged: bool}
@example_request "{\n  \"index_patterns\": [\n    \"te*\",\n    \"bar*\"\n  ],\n  \"settings\": {\n    \"number_of_shards\": 1\n  },\n  \"mappings\": {\n    \"_source\": {\n      \"enabled\": false\n    },\n    \"properties\": {\n      \"host_name\": {\n        \"type\": \"keyword\"\n      },\n      \"created_at\": {\n        \"type\": \"date\",\n        \"format\": \"EEE MMM dd HH:mm:ss Z yyyy\"\n      }\n    }\n  }\n}"

@endpoint POST /_template/{name}
@desc Create or update a legacy index template
@required {name: str # The name of the template}
@optional {create: bool # If true, this request cannot replace or update existing index templates., master_timeout: any # Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error., order: num # Order in which Elasticsearch applies this template if index matches multiple templates.  Templates with lower 'order' values are merged first. Templates with higher 'order' values are merged later, overriding templates with lower values., cause: str # User defined reason for creating or updating the index template, aliases: map # Aliases for the index., index_patterns: any # Array of wildcard expressions used to match the names of indices during creation., mappings: any # Mapping for fields in the index., order: num # Order in which Elasticsearch applies this template if index matches multiple templates.  Templates with lower 'order' values are merged first. Templates with higher 'order' values are merged later, overriding templates with lower values., settings: any # Configuration options for the index., version: any # Version number used to manage index templates externally. This number is not automatically generated by Elasticsearch. To unset a version, replace the template without specifying one.}
@returns(200) {acknowledged: bool}
@example_request "{\n  \"index_patterns\": [\n    \"te*\",\n    \"bar*\"\n  ],\n  \"settings\": {\n    \"number_of_shards\": 1\n  },\n  \"mappings\": {\n    \"_source\": {\n      \"enabled\": false\n    },\n    \"properties\": {\n      \"host_name\": {\n        \"type\": \"keyword\"\n      },\n      \"created_at\": {\n        \"type\": \"date\",\n        \"format\": \"EEE MMM dd HH:mm:ss Z yyyy\"\n      }\n    }\n  }\n}"

@endpoint DELETE /_template/{name}
@desc Delete a legacy index template
@required {name: str # The name of the legacy index template to delete. Wildcard (`*`) expressions are supported.}
@optional {master_timeout: any # Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error., timeout: any # Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error.}
@returns(200) {acknowledged: bool}

@endpoint HEAD /_template/{name}
@desc Check existence of index templates
@required {name: any # A comma-separated list of index template names used to limit the request. Wildcard (`*`) expressions are supported.}
@optional {flat_settings: bool # Indicates whether to use a flat format for the response., local: bool # Indicates whether to get information from the local node only., master_timeout: any # The period to wait for the master node. If the master node is not available before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`.}
@returns(200)

@endgroup

@group {index}
@endpoint POST /{index}/_disk_usage
@desc Analyze the index disk usage
@required {index: any # Comma-separated list of data streams, indices, and aliases used to limit the request. It’s recommended to execute this API with a single index (or the latest backing index of a data stream) as the API consumes resources significantly.}
@optional {allow_no_indices: bool # A setting that does two separate checks on the index expression. If `false`, the request returns an error (1) if any wildcard expression (including `_all` and `*`) resolves to zero matching indices or (2) if the complete set of resolved indices, aliases or data streams is empty after all expressions are evaluated. If `true`, index expressions that resolve to no indices are allowed and the request returns an empty result., expand_wildcards: any # Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`., flush: bool # If `true`, the API performs a flush before analysis. If `false`, the response may not include uncommitted data., ignore_unavailable: bool # If `false`, the request returns an error if it targets a concrete (non-wildcarded) index, alias, or data stream that is missing, closed, or otherwise unavailable. If `true`, unavailable concrete targets are silently ignored., run_expensive_tasks: bool # Analyzing field disk usage is resource-intensive. To use the API, this parameter must be set to `true`.}
@returns(200)

@endpoint POST /{index}/_downsample/{target_index}
@desc Downsample an index
@required {index: str # Name of the time series index to downsample., target_index: str # Name of the index to create., fixed_interval: any # The interval at which to aggregate the original time series index.}
@optional {sampling_method: any # The sampling method used to reduce the documents; it can be either `aggregate` or `last_value`. Defaults to `aggregate`.}
@returns(200)
@example_request "{\n  \"fixed_interval\": \"1d\"\n}"

@endgroup

@group _alias
@endpoint GET /_alias/{name}
@desc Get aliases
@required {name: any # Comma-separated list of aliases to retrieve. Supports wildcards (`*`). To retrieve all aliases, omit this parameter or use `*` or `_all`.}
@optional {allow_no_indices: bool # A setting that does two separate checks on the index expression. If `false`, the request returns an error (1) if any wildcard expression (including `_all` and `*`) resolves to zero matching indices or (2) if the complete set of resolved indices, aliases or data streams is empty after all expressions are evaluated. If `true`, index expressions that resolve to no indices are allowed and the request returns an empty result., expand_wildcards: any # Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`., ignore_unavailable: bool # If `false`, the request returns an error if it targets a concrete (non-wildcarded) index, alias, or data stream that is missing, closed, or otherwise unavailable. If `true`, unavailable concrete targets are silently ignored., master_timeout: any # Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error.}
@returns(200)

@endpoint HEAD /_alias/{name}
@desc Check aliases
@required {name: any # Comma-separated list of aliases to check. Supports wildcards (`*`).}
@optional {allow_no_indices: bool # A setting that does two separate checks on the index expression. If `false`, the request returns an error (1) if any wildcard expression (including `_all` and `*`) resolves to zero matching indices or (2) if the complete set of resolved indices, aliases or data streams is empty after all expressions are evaluated. If `true`, index expressions that resolve to no indices are allowed and the request returns an empty result., expand_wildcards: any # Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`., ignore_unavailable: bool # If `false`, the request returns an error if it targets a concrete (non-wildcarded) index, alias, or data stream that is missing, closed, or otherwise unavailable. If `true`, unavailable concrete targets are silently ignored., master_timeout: any # Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error.}
@returns(200)

@endgroup

@group {index}
@endpoint GET /{index}/_lifecycle/explain
@desc Get the status for a data stream lifecycle
@required {index: any # Comma-separated list of index names to explain}
@optional {include_defaults: bool # Indicates if the API should return the default values the system uses for the index's lifecycle, master_timeout: any # The period to wait for a connection to the master node.}
@returns(200) {indices: map}

@endpoint GET /{index}/_field_usage_stats
@desc Get field usage stats
@required {index: any # Comma-separated list or wildcard expression of index names used to limit the request.}
@optional {allow_no_indices: bool # A setting that does two separate checks on the index expression. If `false`, the request returns an error (1) if any wildcard expression (including `_all` and `*`) resolves to zero matching indices or (2) if the complete set of resolved indices, aliases or data streams is empty after all expressions are evaluated. If `true`, index expressions that resolve to no indices are allowed and the request returns an empty result., expand_wildcards: any # Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`., ignore_unavailable: bool # If `false`, the request returns an error if it targets a concrete (non-wildcarded) index, alias, or data stream that is missing, closed, or otherwise unavailable. If `true`, unavailable concrete targets are silently ignored., fields: any # Comma-separated list or wildcard expressions of fields to include in the statistics.}
@returns(200) {_shards: any}

@endgroup

@group _flush
@endpoint GET /_flush
@desc Flush data streams or indices
@optional {allow_no_indices: bool # A setting that does two separate checks on the index expression. If `false`, the request returns an error (1) if any wildcard expression (including `_all` and `*`) resolves to zero matching indices or (2) if the complete set of resolved indices, aliases or data streams is empty after all expressions are evaluated. If `true`, index expressions that resolve to no indices are allowed and the request returns an empty result., expand_wildcards: any # Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`., force: bool # If `true`, the request forces a flush even if there are no changes to commit to the index., ignore_unavailable: bool # If `false`, the request returns an error if it targets a concrete (non-wildcarded) index, alias, or data stream that is missing, closed, or otherwise unavailable. If `true`, unavailable concrete targets are silently ignored., wait_if_ongoing: bool # If `true`, the flush operation blocks until execution when another flush operation is running. If `false`, Elasticsearch returns an error if you request a flush when another flush operation is running.}
@returns(200) {_shards: any}

@endpoint POST /_flush
@desc Flush data streams or indices
@optional {allow_no_indices: bool # A setting that does two separate checks on the index expression. If `false`, the request returns an error (1) if any wildcard expression (including `_all` and `*`) resolves to zero matching indices or (2) if the complete set of resolved indices, aliases or data streams is empty after all expressions are evaluated. If `true`, index expressions that resolve to no indices are allowed and the request returns an empty result., expand_wildcards: any # Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`., force: bool # If `true`, the request forces a flush even if there are no changes to commit to the index., ignore_unavailable: bool # If `false`, the request returns an error if it targets a concrete (non-wildcarded) index, alias, or data stream that is missing, closed, or otherwise unavailable. If `true`, unavailable concrete targets are silently ignored., wait_if_ongoing: bool # If `true`, the flush operation blocks until execution when another flush operation is running. If `false`, Elasticsearch returns an error if you request a flush when another flush operation is running.}
@returns(200) {_shards: any}

@endgroup

@group {index}
@endpoint GET /{index}/_flush
@desc Flush data streams or indices
@required {index: any # Comma-separated list of data streams, indices, and aliases to flush. Supports wildcards (`*`). To flush all data streams and indices, omit this parameter or use `*` or `_all`.}
@optional {allow_no_indices: bool # A setting that does two separate checks on the index expression. If `false`, the request returns an error (1) if any wildcard expression (including `_all` and `*`) resolves to zero matching indices or (2) if the complete set of resolved indices, aliases or data streams is empty after all expressions are evaluated. If `true`, index expressions that resolve to no indices are allowed and the request returns an empty result., expand_wildcards: any # Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`., force: bool # If `true`, the request forces a flush even if there are no changes to commit to the index., ignore_unavailable: bool # If `false`, the request returns an error if it targets a concrete (non-wildcarded) index, alias, or data stream that is missing, closed, or otherwise unavailable. If `true`, unavailable concrete targets are silently ignored., wait_if_ongoing: bool # If `true`, the flush operation blocks until execution when another flush operation is running. If `false`, Elasticsearch returns an error if you request a flush when another flush operation is running.}
@returns(200) {_shards: any}

@endpoint POST /{index}/_flush
@desc Flush data streams or indices
@required {index: any # Comma-separated list of data streams, indices, and aliases to flush. Supports wildcards (`*`). To flush all data streams and indices, omit this parameter or use `*` or `_all`.}
@optional {allow_no_indices: bool # A setting that does two separate checks on the index expression. If `false`, the request returns an error (1) if any wildcard expression (including `_all` and `*`) resolves to zero matching indices or (2) if the complete set of resolved indices, aliases or data streams is empty after all expressions are evaluated. If `true`, index expressions that resolve to no indices are allowed and the request returns an empty result., expand_wildcards: any # Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`., force: bool # If `true`, the request forces a flush even if there are no changes to commit to the index., ignore_unavailable: bool # If `false`, the request returns an error if it targets a concrete (non-wildcarded) index, alias, or data stream that is missing, closed, or otherwise unavailable. If `true`, unavailable concrete targets are silently ignored., wait_if_ongoing: bool # If `true`, the flush operation blocks until execution when another flush operation is running. If `false`, Elasticsearch returns an error if you request a flush when another flush operation is running.}
@returns(200) {_shards: any}

@endgroup

@group _forcemerge
@endpoint POST /_forcemerge
@desc Force a merge
@optional {allow_no_indices: bool # A setting that does two separate checks on the index expression. If `false`, the request returns an error (1) if any wildcard expression (including `_all` and `*`) resolves to zero matching indices or (2) if the complete set of resolved indices, aliases or data streams is empty after all expressions are evaluated. If `true`, index expressions that resolve to no indices are allowed and the request returns an empty result., expand_wildcards: any # Whether to expand wildcard expression to concrete indices that are open, closed or both., flush: bool # Specify whether the index should be flushed after performing the operation, ignore_unavailable: bool # If `false`, the request returns an error if it targets a concrete (non-wildcarded) index, alias, or data stream that is missing, closed, or otherwise unavailable. If `true`, unavailable concrete targets are silently ignored., max_num_segments: num # The number of segments the index should be merged into (default: dynamic), only_expunge_deletes: bool # Specify whether the operation should only expunge deleted documents, wait_for_completion: bool # Should the request wait until the force merge is completed}
@returns(200)

@endgroup

@group {index}
@endpoint POST /{index}/_forcemerge
@desc Force a merge
@required {index: any # A comma-separated list of index names; use `_all` or empty string to perform the operation on all indices}
@optional {allow_no_indices: bool # A setting that does two separate checks on the index expression. If `false`, the request returns an error (1) if any wildcard expression (including `_all` and `*`) resolves to zero matching indices or (2) if the complete set of resolved indices, aliases or data streams is empty after all expressions are evaluated. If `true`, index expressions that resolve to no indices are allowed and the request returns an empty result., expand_wildcards: any # Whether to expand wildcard expression to concrete indices that are open, closed or both., flush: bool # Specify whether the index should be flushed after performing the operation, ignore_unavailable: bool # If `false`, the request returns an error if it targets a concrete (non-wildcarded) index, alias, or data stream that is missing, closed, or otherwise unavailable. If `true`, unavailable concrete targets are silently ignored., max_num_segments: num # The number of segments the index should be merged into (default: dynamic), only_expunge_deletes: bool # Specify whether the operation should only expunge deleted documents, wait_for_completion: bool # Should the request wait until the force merge is completed}
@returns(200)

@endgroup

@group _alias
@endpoint GET /_alias
@desc Get aliases
@optional {allow_no_indices: bool # A setting that does two separate checks on the index expression. If `false`, the request returns an error (1) if any wildcard expression (including `_all` and `*`) resolves to zero matching indices or (2) if the complete set of resolved indices, aliases or data streams is empty after all expressions are evaluated. If `true`, index expressions that resolve to no indices are allowed and the request returns an empty result., expand_wildcards: any # Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`., ignore_unavailable: bool # If `false`, the request returns an error if it targets a concrete (non-wildcarded) index, alias, or data stream that is missing, closed, or otherwise unavailable. If `true`, unavailable concrete targets are silently ignored., master_timeout: any # Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error.}
@returns(200)

@endgroup

@group {index}
@endpoint GET /{index}/_alias
@desc Get aliases
@required {index: any # Comma-separated list of data streams or indices used to limit the request. Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`.}
@optional {allow_no_indices: bool # A setting that does two separate checks on the index expression. If `false`, the request returns an error (1) if any wildcard expression (including `_all` and `*`) resolves to zero matching indices or (2) if the complete set of resolved indices, aliases or data streams is empty after all expressions are evaluated. If `true`, index expressions that resolve to no indices are allowed and the request returns an empty result., expand_wildcards: any # Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`., ignore_unavailable: bool # If `false`, the request returns an error if it targets a concrete (non-wildcarded) index, alias, or data stream that is missing, closed, or otherwise unavailable. If `true`, unavailable concrete targets are silently ignored., master_timeout: any # Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error.}
@returns(200)

@endgroup

@group _lifecycle
@endpoint GET /_lifecycle/stats
@desc Get data stream lifecycle stats
@returns(200) {data_stream_count: num, data_streams: [map], last_run_duration_in_millis: any, time_between_starts_in_millis: any}

@endgroup

@group _data_stream
@endpoint GET /_data_stream
@desc Get data streams
@optional {expand_wildcards: any # Type of data stream that wildcard patterns can match. Supports comma-separated values, such as `open,hidden`., include_defaults: bool # If true, returns all relevant default configurations for the index template., master_timeout: any # Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error., verbose: bool # Whether the maximum timestamp for each data stream should be calculated and returned.}
@returns(200) {data_streams: [map]}

@endpoint GET /_data_stream/{name}/_mappings
@desc Get data stream mappings
@required {name: any # A comma-separated list of data streams or data stream patterns. Supports wildcards (`*`).}
@optional {master_timeout: any # The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error.}
@returns(200) {data_streams: [map]}

@endpoint PUT /_data_stream/{name}/_mappings
@desc Update data stream mappings
@required {name: any # A comma-separated list of data streams or data stream patterns.}
@optional {dry_run: bool # If `true`, the request does not actually change the mappings on any data streams. Instead, it simulates changing the settings and reports back to the user what would have happened had these settings actually been applied., master_timeout: any # The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error., timeout: any # The period to wait for a response. If no response is received before the  timeout expires, the request fails and returns an error., all_field: any, date_detection: bool, dynamic: any, dynamic_date_formats: [str], dynamic_templates: [map], _field_names: any, index_field: any, _meta: any, numeric_detection: bool, properties: map, _routing: any, _size: any, _source: any, runtime: map, enabled: bool, subobjects: any, _data_stream_timestamp: any}
@returns(200) {data_streams: [map]}
@example_request "{\n   \"properties\":{\n      \"field1\":{\n         \"type\":\"ip\"\n      },\n      \"field3\":{\n         \"type\":\"text\"\n      }\n   }\n}"

@endpoint GET /_data_stream/{name}/_settings
@desc Get data stream settings
@required {name: any # A comma-separated list of data streams or data stream patterns. Supports wildcards (`*`).}
@optional {master_timeout: any # The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error.}
@returns(200) {data_streams: [map]}

@endpoint PUT /_data_stream/{name}/_settings
@desc Update data stream settings
@required {name: any # A comma-separated list of data streams or data stream patterns.}
@optional {dry_run: bool # If `true`, the request does not actually change the settings on any data streams or indices. Instead, it simulates changing the settings and reports back to the user what would have happened had these settings actually been applied., master_timeout: any # The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error., timeout: any # The period to wait for a response. If no response is received before the  timeout expires, the request fails and returns an error., index: any, mode: str, routing_path: any, soft_deletes: any, sort: any, number_of_shards: any=1, number_of_replicas: any=0, number_of_routing_shards: num, check_on_startup: any=false, codec: str=LZ4, routing_partition_size: any=1, load_fixed_bitset_filters_eagerly: bool=true, hidden: any=false, auto_expand_replicas: any=false, merge: any, search: any, refresh_interval: any=1s, max_result_window: num=10000, max_inner_result_window: num=100, max_rescore_window: num=10000, max_docvalue_fields_search: num=100, max_script_fields: num=32, max_ngram_diff: num=1, max_shingle_diff: num=3, blocks: any, max_refresh_listeners: num, analyze: any # Settings to define analyzers, tokenizers, token filters and character filters. Refer to the linked documentation for step-by-step examples of updating analyzers on existing indices., highlight: any, max_terms_count: num=65536, max_regex_length: num=1000, routing: any, gc_deletes: any=60s, default_pipeline: any=_none, final_pipeline: any=_none, lifecycle: any, provided_name: any, creation_date: any, creation_date_string: any, uuid: any, version: any, verified_before_close: any, format: any, max_slices_per_scroll: num, translog: any, query_string: any, priority: any, top_metrics_max_size: num, analysis: any, settings: any, time_series: any, queries: any, similarity: map # Configure custom similarity settings to customize how search results are scored., mapping: any # Enable or disable dynamic mapping for an index., indexing.slowlog: any, indexing_pressure: any # Configure indexing back pressure limits., store: any # The store module allows you to control how index data is stored and accessed on disk.}
@returns(200) {data_streams: [map]}
@example_request "{\n  \"index.lifecycle.name\" : \"new-test-policy\",\n  \"index.number_of_shards\": 11\n}"

@endgroup

@group _mapping
@endpoint GET /_mapping/field/{fields}
@desc Get mapping definitions
@required {fields: any # Comma-separated list or wildcard expression of fields used to limit returned information. Supports wildcards (`*`).}
@optional {allow_no_indices: bool # A setting that does two separate checks on the index expression. If `false`, the request returns an error (1) if any wildcard expression (including `_all` and `*`) resolves to zero matching indices or (2) if the complete set of resolved indices, aliases or data streams is empty after all expressions are evaluated. If `true`, index expressions that resolve to no indices are allowed and the request returns an empty result., expand_wildcards: any # Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`., ignore_unavailable: bool # If `false`, the request returns an error if it targets a concrete (non-wildcarded) index, alias, or data stream that is missing, closed, or otherwise unavailable. If `true`, unavailable concrete targets are silently ignored., include_defaults: bool # If `true`, return all default settings in the response.}
@returns(200)

@endgroup

@group {index}
@endpoint GET /{index}/_mapping/field/{fields}
@desc Get mapping definitions
@required {index: any # Comma-separated list of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`., fields: any # Comma-separated list or wildcard expression of fields used to limit returned information. Supports wildcards (`*`).}
@optional {allow_no_indices: bool # A setting that does two separate checks on the index expression. If `false`, the request returns an error (1) if any wildcard expression (including `_all` and `*`) resolves to zero matching indices or (2) if the complete set of resolved indices, aliases or data streams is empty after all expressions are evaluated. If `true`, index expressions that resolve to no indices are allowed and the request returns an empty result., expand_wildcards: any # Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`., ignore_unavailable: bool # If `false`, the request returns an error if it targets a concrete (non-wildcarded) index, alias, or data stream that is missing, closed, or otherwise unavailable. If `true`, unavailable concrete targets are silently ignored., include_defaults: bool # If `true`, return all default settings in the response.}
@returns(200)

@endgroup

@group _index_template
@endpoint GET /_index_template
@desc Get index templates
@optional {local: bool # If true, the request retrieves information from the local node only. Defaults to false, which means information is retrieved from the master node., flat_settings: bool # If true, returns settings in flat format., master_timeout: any # Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error., include_defaults: bool # If true, returns all relevant default configurations for the index template.}
@returns(200) {index_templates: [map]}

@endgroup

@group _mapping
@endpoint GET /_mapping
@desc Get mapping definitions
@optional {allow_no_indices: bool # A setting that does two separate checks on the index expression. If `false`, the request returns an error (1) if any wildcard expression (including `_all` and `*`) resolves to zero matching indices or (2) if the complete set of resolved indices, aliases or data streams is empty after all expressions are evaluated. If `true`, index expressions that resolve to no indices are allowed and the request returns an empty result., expand_wildcards: any # Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`., ignore_unavailable: bool # If `false`, the request returns an error if it targets a concrete (non-wildcarded) index, alias, or data stream that is missing, closed, or otherwise unavailable. If `true`, unavailable concrete targets are silently ignored., local: bool # If `true`, the request retrieves information from the local node only., master_timeout: any # Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error.}
@returns(200)

@endgroup

@group {index}
@endpoint GET /{index}/_mapping
@desc Get mapping definitions
@required {index: any # Comma-separated list of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`.}
@optional {allow_no_indices: bool # A setting that does two separate checks on the index expression. If `false`, the request returns an error (1) if any wildcard expression (including `_all` and `*`) resolves to zero matching indices or (2) if the complete set of resolved indices, aliases or data streams is empty after all expressions are evaluated. If `true`, index expressions that resolve to no indices are allowed and the request returns an empty result., expand_wildcards: any # Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`., ignore_unavailable: bool # If `false`, the request returns an error if it targets a concrete (non-wildcarded) index, alias, or data stream that is missing, closed, or otherwise unavailable. If `true`, unavailable concrete targets are silently ignored., local: bool # If `true`, the request retrieves information from the local node only., master_timeout: any # Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error.}
@returns(200)

@endpoint PUT /{index}/_mapping
@desc Update field mappings
@required {index: any # A comma-separated list of index names the mapping should be added to (supports wildcards). Use `_all` or omit to add the mapping on all indices.}
@optional {allow_no_indices: bool # A setting that does two separate checks on the index expression. If `false`, the request returns an error (1) if any wildcard expression (including `_all` and `*`) resolves to zero matching indices or (2) if the complete set of resolved indices, aliases or data streams is empty after all expressions are evaluated. If `true`, index expressions that resolve to no indices are allowed and the request returns an empty result., expand_wildcards: any # Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`., ignore_unavailable: bool # If `false`, the request returns an error if it targets a concrete (non-wildcarded) index, alias, or data stream that is missing, closed, or otherwise unavailable. If `true`, unavailable concrete targets are silently ignored., master_timeout: any # Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error., timeout: any # Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error., write_index_only: bool # If `true`, the mappings are applied only to the current write index for the target., date_detection: bool # Controls whether dynamic date detection is enabled., dynamic: any # Controls whether new fields are added dynamically., dynamic_date_formats: [str] # If date detection is enabled then new string fields are checked against 'dynamic_date_formats' and if the value matches then a new date field is added instead of string., dynamic_templates: [map] # Specify dynamic templates for the mapping., _field_names: any # Control whether field names are enabled for the index., _meta: any # A mapping type can have custom meta data associated with it. These are not used at all by Elasticsearch, but can be used to store application-specific metadata., numeric_detection: bool=false # Automatically map strings into numeric data types for all fields., properties: map # Mapping for a field. For new fields, this mapping can include:  - Field name - Field data type - Mapping parameters, _routing: any # Enable making a routing value required on indexed documents., _source: any # Control whether the _source field is enabled on the index., runtime: any # Mapping of runtime fields for the index.}
@returns(200)
@example_request "{\n  \"properties\": {\n    \"user\": {\n      \"properties\": {\n        \"name\": {\n          \"type\": \"keyword\"\n        }\n      }\n    }\n  }\n}"

@endpoint POST /{index}/_mapping
@desc Update field mappings
@required {index: any # A comma-separated list of index names the mapping should be added to (supports wildcards). Use `_all` or omit to add the mapping on all indices.}
@optional {allow_no_indices: bool # A setting that does two separate checks on the index expression. If `false`, the request returns an error (1) if any wildcard expression (including `_all` and `*`) resolves to zero matching indices or (2) if the complete set of resolved indices, aliases or data streams is empty after all expressions are evaluated. If `true`, index expressions that resolve to no indices are allowed and the request returns an empty result., expand_wildcards: any # Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`., ignore_unavailable: bool # If `false`, the request returns an error if it targets a concrete (non-wildcarded) index, alias, or data stream that is missing, closed, or otherwise unavailable. If `true`, unavailable concrete targets are silently ignored., master_timeout: any # Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error., timeout: any # Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error., write_index_only: bool # If `true`, the mappings are applied only to the current write index for the target., date_detection: bool # Controls whether dynamic date detection is enabled., dynamic: any # Controls whether new fields are added dynamically., dynamic_date_formats: [str] # If date detection is enabled then new string fields are checked against 'dynamic_date_formats' and if the value matches then a new date field is added instead of string., dynamic_templates: [map] # Specify dynamic templates for the mapping., _field_names: any # Control whether field names are enabled for the index., _meta: any # A mapping type can have custom meta data associated with it. These are not used at all by Elasticsearch, but can be used to store application-specific metadata., numeric_detection: bool=false # Automatically map strings into numeric data types for all fields., properties: map # Mapping for a field. For new fields, this mapping can include:  - Field name - Field data type - Mapping parameters, _routing: any # Enable making a routing value required on indexed documents., _source: any # Control whether the _source field is enabled on the index., runtime: any # Mapping of runtime fields for the index.}
@returns(200)
@example_request "{\n  \"properties\": {\n    \"user\": {\n      \"properties\": {\n        \"name\": {\n          \"type\": \"keyword\"\n        }\n      }\n    }\n  }\n}"

@endgroup

@group _migration
@endpoint GET /_migration/reindex/{index}/_status
@desc Get the migration reindexing status
@required {index: any # The index or data stream name.}
@returns(200) {start_time: any, start_time_millis: any, complete: bool, total_indices_in_data_stream: num, total_indices_requiring_upgrade: num, successes: num, in_progress: [map], pending: num, errors: [map], exception: str}

@endgroup

@group _settings
@endpoint GET /_settings
@desc Get index settings
@optional {allow_no_indices: bool # A setting that does two separate checks on the index expression. If `false`, the request returns an error (1) if any wildcard expression (including `_all` and `*`) resolves to zero matching indices or (2) if the complete set of resolved indices, aliases or data streams is empty after all expressions are evaluated. If `true`, index expressions that resolve to no indices are allowed and the request returns an empty result., expand_wildcards: any # Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`., flat_settings: bool # If `true`, returns settings in flat format., ignore_unavailable: bool # If `false`, the request returns an error if it targets a concrete (non-wildcarded) index, alias, or data stream that is missing, closed, or otherwise unavailable. If `true`, unavailable concrete targets are silently ignored., include_defaults: bool # If `true`, return all default settings in the response., local: bool # If `true`, the request retrieves information from the local node only. If `false`, information is retrieved from the master node., master_timeout: any # Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error.}
@returns(200)

@endpoint PUT /_settings
@desc Update index settings
@optional {allow_no_indices: bool # A setting that does two separate checks on the index expression. If `false`, the request returns an error (1) if any wildcard expression (including `_all` and `*`) resolves to zero matching indices or (2) if the complete set of resolved indices, aliases or data streams is empty after all expressions are evaluated. If `true`, index expressions that resolve to no indices are allowed and the request returns an empty result., expand_wildcards: any # Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`., flat_settings: bool # If `true`, returns settings in flat format., ignore_unavailable: bool # If `false`, the request returns an error if it targets a concrete (non-wildcarded) index, alias, or data stream that is missing, closed, or otherwise unavailable. If `true`, unavailable concrete targets are silently ignored., master_timeout: any # Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error., preserve_existing: bool # If `true`, existing index settings remain unchanged., reopen: bool # Whether to close and reopen the index to apply non-dynamic settings. If set to `true` the indices to which the settings are being applied will be closed temporarily and then reopened in order to apply the changes., timeout: any # Period to wait for a response. If no response is received before the  timeout expires, the request fails and returns an error., index: any, mode: str, routing_path: any, soft_deletes: any, sort: any, number_of_shards: any=1, number_of_replicas: any=0, number_of_routing_shards: num, check_on_startup: any=false, codec: str=LZ4, routing_partition_size: any=1, load_fixed_bitset_filters_eagerly: bool=true, hidden: any=false, auto_expand_replicas: any=false, merge: any, search: any, refresh_interval: any=1s, max_result_window: num=10000, max_inner_result_window: num=100, max_rescore_window: num=10000, max_docvalue_fields_search: num=100, max_script_fields: num=32, max_ngram_diff: num=1, max_shingle_diff: num=3, blocks: any, max_refresh_listeners: num, analyze: any # Settings to define analyzers, tokenizers, token filters and character filters. Refer to the linked documentation for step-by-step examples of updating analyzers on existing indices., highlight: any, max_terms_count: num=65536, max_regex_length: num=1000, routing: any, gc_deletes: any=60s, default_pipeline: any=_none, final_pipeline: any=_none, lifecycle: any, provided_name: any, creation_date: any, creation_date_string: any, uuid: any, version: any, verified_before_close: any, format: any, max_slices_per_scroll: num, translog: any, query_string: any, priority: any, top_metrics_max_size: num, analysis: any, settings: any, time_series: any, queries: any, similarity: map # Configure custom similarity settings to customize how search results are scored., mapping: any # Enable or disable dynamic mapping for an index., indexing.slowlog: any, indexing_pressure: any # Configure indexing back pressure limits., store: any # The store module allows you to control how index data is stored and accessed on disk.}
@returns(200) {acknowledged: bool}
@example_request "{\n  \"index\" : {\n    \"number_of_replicas\" : 2\n  }\n}"

@endgroup

@group {index}
@endpoint GET /{index}/_settings
@desc Get index settings
@required {index: any # Comma-separated list of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`.}
@optional {allow_no_indices: bool # A setting that does two separate checks on the index expression. If `false`, the request returns an error (1) if any wildcard expression (including `_all` and `*`) resolves to zero matching indices or (2) if the complete set of resolved indices, aliases or data streams is empty after all expressions are evaluated. If `true`, index expressions that resolve to no indices are allowed and the request returns an empty result., expand_wildcards: any # Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`., flat_settings: bool # If `true`, returns settings in flat format., ignore_unavailable: bool # If `false`, the request returns an error if it targets a concrete (non-wildcarded) index, alias, or data stream that is missing, closed, or otherwise unavailable. If `true`, unavailable concrete targets are silently ignored., include_defaults: bool # If `true`, return all default settings in the response., local: bool # If `true`, the request retrieves information from the local node only. If `false`, information is retrieved from the master node., master_timeout: any # Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error.}
@returns(200)

@endpoint PUT /{index}/_settings
@desc Update index settings
@required {index: any # Comma-separated list of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`.}
@optional {allow_no_indices: bool # A setting that does two separate checks on the index expression. If `false`, the request returns an error (1) if any wildcard expression (including `_all` and `*`) resolves to zero matching indices or (2) if the complete set of resolved indices, aliases or data streams is empty after all expressions are evaluated. If `true`, index expressions that resolve to no indices are allowed and the request returns an empty result., expand_wildcards: any # Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`., flat_settings: bool # If `true`, returns settings in flat format., ignore_unavailable: bool # If `false`, the request returns an error if it targets a concrete (non-wildcarded) index, alias, or data stream that is missing, closed, or otherwise unavailable. If `true`, unavailable concrete targets are silently ignored., master_timeout: any # Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error., preserve_existing: bool # If `true`, existing index settings remain unchanged., reopen: bool # Whether to close and reopen the index to apply non-dynamic settings. If set to `true` the indices to which the settings are being applied will be closed temporarily and then reopened in order to apply the changes., timeout: any # Period to wait for a response. If no response is received before the  timeout expires, the request fails and returns an error., index: any, mode: str, routing_path: any, soft_deletes: any, sort: any, number_of_shards: any=1, number_of_replicas: any=0, number_of_routing_shards: num, check_on_startup: any=false, codec: str=LZ4, routing_partition_size: any=1, load_fixed_bitset_filters_eagerly: bool=true, hidden: any=false, auto_expand_replicas: any=false, merge: any, search: any, refresh_interval: any=1s, max_result_window: num=10000, max_inner_result_window: num=100, max_rescore_window: num=10000, max_docvalue_fields_search: num=100, max_script_fields: num=32, max_ngram_diff: num=1, max_shingle_diff: num=3, blocks: any, max_refresh_listeners: num, analyze: any # Settings to define analyzers, tokenizers, token filters and character filters. Refer to the linked documentation for step-by-step examples of updating analyzers on existing indices., highlight: any, max_terms_count: num=65536, max_regex_length: num=1000, routing: any, gc_deletes: any=60s, default_pipeline: any=_none, final_pipeline: any=_none, lifecycle: any, provided_name: any, creation_date: any, creation_date_string: any, uuid: any, version: any, verified_before_close: any, format: any, max_slices_per_scroll: num, translog: any, query_string: any, priority: any, top_metrics_max_size: num, analysis: any, settings: any, time_series: any, queries: any, similarity: map # Configure custom similarity settings to customize how search results are scored., mapping: any # Enable or disable dynamic mapping for an index., indexing.slowlog: any, indexing_pressure: any # Configure indexing back pressure limits., store: any # The store module allows you to control how index data is stored and accessed on disk.}
@returns(200) {acknowledged: bool}
@example_request "{\n  \"index\" : {\n    \"number_of_replicas\" : 2\n  }\n}"

@endpoint GET /{index}/_settings/{name}
@desc Get index settings
@required {index: any # Comma-separated list of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`., name: any # Comma-separated list or wildcard expression of settings to retrieve.}
@optional {allow_no_indices: bool # A setting that does two separate checks on the index expression. If `false`, the request returns an error (1) if any wildcard expression (including `_all` and `*`) resolves to zero matching indices or (2) if the complete set of resolved indices, aliases or data streams is empty after all expressions are evaluated. If `true`, index expressions that resolve to no indices are allowed and the request returns an empty result., expand_wildcards: any # Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`., flat_settings: bool # If `true`, returns settings in flat format., ignore_unavailable: bool # If `false`, the request returns an error if it targets a concrete (non-wildcarded) index, alias, or data stream that is missing, closed, or otherwise unavailable. If `true`, unavailable concrete targets are silently ignored., include_defaults: bool # If `true`, return all default settings in the response., local: bool # If `true`, the request retrieves information from the local node only. If `false`, information is retrieved from the master node., master_timeout: any # Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error.}
@returns(200)

@endgroup

@group _settings
@endpoint GET /_settings/{name}
@desc Get index settings
@required {name: any # Comma-separated list or wildcard expression of settings to retrieve.}
@optional {allow_no_indices: bool # A setting that does two separate checks on the index expression. If `false`, the request returns an error (1) if any wildcard expression (including `_all` and `*`) resolves to zero matching indices or (2) if the complete set of resolved indices, aliases or data streams is empty after all expressions are evaluated. If `true`, index expressions that resolve to no indices are allowed and the request returns an empty result., expand_wildcards: any # Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`., flat_settings: bool # If `true`, returns settings in flat format., ignore_unavailable: bool # If `false`, the request returns an error if it targets a concrete (non-wildcarded) index, alias, or data stream that is missing, closed, or otherwise unavailable. If `true`, unavailable concrete targets are silently ignored., include_defaults: bool # If `true`, return all default settings in the response., local: bool # If `true`, the request retrieves information from the local node only. If `false`, information is retrieved from the master node., master_timeout: any # Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error.}
@returns(200)

@endgroup

@group _template
@endpoint GET /_template
@desc Get legacy index templates
@optional {flat_settings: bool # If `true`, returns settings in flat format., local: bool # If `true`, the request retrieves information from the local node only., master_timeout: any # Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error.}
@returns(200)

@endgroup

@group _migration
@endpoint POST /_migration/reindex
@desc Reindex legacy backing indices
@required {mode: any # Reindex mode. Currently only 'upgrade' is supported., source: any # The source index or data stream (only data streams are currently supported).}
@returns(200) {acknowledged: bool}
@example_request "{\n    \"source\": {\n        \"index\": \"my-data-stream\"\n    },\n    \"mode\": \"upgrade\"\n}"

@endgroup

@group _data_stream
@endpoint POST /_data_stream/_migrate/{name}
@desc Convert an index alias to a data stream
@required {name: str # Name of the index alias to convert to a data stream.}
@optional {master_timeout: any # Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error., timeout: any # Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error.}
@returns(200) {acknowledged: bool}

@endpoint POST /_data_stream/_modify
@desc Update data streams
@required {actions: [map{add_backing_index: any, remove_backing_index: any}] # Actions to perform.}
@returns(200) {acknowledged: bool}
@example_request "{\n  \"actions\": [\n    {\n      \"remove_backing_index\": {\n        \"data_stream\": \"my-data-stream\",\n        \"index\": \".ds-my-data-stream-2023.07.26-000001\"\n      }\n    },\n    {\n      \"add_backing_index\": {\n        \"data_stream\": \"my-data-stream\",\n        \"index\": \".ds-my-data-stream-2023.07.26-000001-downsample\"\n      }\n    }\n  ]\n}"

@endgroup

@group {index}
@endpoint POST /{index}/_open
@desc Open a closed index
@required {index: any # Comma-separated list of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). By default, you must explicitly name the indices you using to limit the request. To limit a request using `_all`, `*`, or other wildcard expressions, change the `action.destructive_requires_name` setting to false. You can update this setting in the `elasticsearch.yml` file or using the cluster update settings API.}
@optional {allow_no_indices: bool # A setting that does two separate checks on the index expression. If `false`, the request returns an error (1) if any wildcard expression (including `_all` and `*`) resolves to zero matching indices or (2) if the complete set of resolved indices, aliases or data streams is empty after all expressions are evaluated. If `true`, index expressions that resolve to no indices are allowed and the request returns an empty result., expand_wildcards: any # Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`., ignore_unavailable: bool # If `false`, the request returns an error if it targets a concrete (non-wildcarded) index, alias, or data stream that is missing, closed, or otherwise unavailable. If `true`, unavailable concrete targets are silently ignored., master_timeout: any # Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error., timeout: any # Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error., wait_for_active_shards: any # The number of shard copies that must be active before proceeding with the operation. Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`).}
@returns(200) {acknowledged: bool, shards_acknowledged: bool}

@endgroup

@group _data_stream
@endpoint POST /_data_stream/_promote/{name}
@desc Promote a data stream
@required {name: str # The name of the data stream to promote}
@optional {master_timeout: any # Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error.}
@returns(200)

@endgroup

@group _recovery
@endpoint GET /_recovery
@desc Get index recovery information
@optional {active_only: bool # If `true`, the response only includes ongoing shard recoveries., detailed: bool # If `true`, the response includes detailed information about shard recoveries., allow_no_indices: bool # A setting that does two separate checks on the index expression. If `false`, the request returns an error (1) if any wildcard expression (including `_all` and `*`) resolves to zero matching indices or (2) if the complete set of resolved indices, aliases or data streams is empty after all expressions are evaluated. If `true`, index expressions that resolve to no indices are allowed and the request returns an empty result., expand_wildcards: any # Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`., ignore_unavailable: bool # If `false`, the request returns an error if it targets a concrete (non-wildcarded) index, alias, or data stream that is missing, closed, or otherwise unavailable. If `true`, unavailable concrete targets are silently ignored.}
@returns(200)

@endgroup

@group {index}
@endpoint GET /{index}/_recovery
@desc Get index recovery information
@required {index: any # Comma-separated list of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`.}
@optional {active_only: bool # If `true`, the response only includes ongoing shard recoveries., detailed: bool # If `true`, the response includes detailed information about shard recoveries., allow_no_indices: bool # A setting that does two separate checks on the index expression. If `false`, the request returns an error (1) if any wildcard expression (including `_all` and `*`) resolves to zero matching indices or (2) if the complete set of resolved indices, aliases or data streams is empty after all expressions are evaluated. If `true`, index expressions that resolve to no indices are allowed and the request returns an empty result., expand_wildcards: any # Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`., ignore_unavailable: bool # If `false`, the request returns an error if it targets a concrete (non-wildcarded) index, alias, or data stream that is missing, closed, or otherwise unavailable. If `true`, unavailable concrete targets are silently ignored.}
@returns(200)

@endgroup

@group _refresh
@endpoint GET /_refresh
@desc Refresh an index
@optional {allow_no_indices: bool # A setting that does two separate checks on the index expression. If `false`, the request returns an error (1) if any wildcard expression (including `_all` and `*`) resolves to zero matching indices or (2) if the complete set of resolved indices, aliases or data streams is empty after all expressions are evaluated. If `true`, index expressions that resolve to no indices are allowed and the request returns an empty result., expand_wildcards: any # Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`., ignore_unavailable: bool # If `false`, the request returns an error if it targets a concrete (non-wildcarded) index, alias, or data stream that is missing, closed, or otherwise unavailable. If `true`, unavailable concrete targets are silently ignored.}
@returns(200) {_shards: any}

@endpoint POST /_refresh
@desc Refresh an index
@optional {allow_no_indices: bool # A setting that does two separate checks on the index expression. If `false`, the request returns an error (1) if any wildcard expression (including `_all` and `*`) resolves to zero matching indices or (2) if the complete set of resolved indices, aliases or data streams is empty after all expressions are evaluated. If `true`, index expressions that resolve to no indices are allowed and the request returns an empty result., expand_wildcards: any # Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`., ignore_unavailable: bool # If `false`, the request returns an error if it targets a concrete (non-wildcarded) index, alias, or data stream that is missing, closed, or otherwise unavailable. If `true`, unavailable concrete targets are silently ignored.}
@returns(200) {_shards: any}

@endgroup

@group {index}
@endpoint GET /{index}/_refresh
@desc Refresh an index
@required {index: any # Comma-separated list of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`.}
@optional {allow_no_indices: bool # A setting that does two separate checks on the index expression. If `false`, the request returns an error (1) if any wildcard expression (including `_all` and `*`) resolves to zero matching indices or (2) if the complete set of resolved indices, aliases or data streams is empty after all expressions are evaluated. If `true`, index expressions that resolve to no indices are allowed and the request returns an empty result., expand_wildcards: any # Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`., ignore_unavailable: bool # If `false`, the request returns an error if it targets a concrete (non-wildcarded) index, alias, or data stream that is missing, closed, or otherwise unavailable. If `true`, unavailable concrete targets are silently ignored.}
@returns(200) {_shards: any}

@endpoint POST /{index}/_refresh
@desc Refresh an index
@required {index: any # Comma-separated list of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`.}
@optional {allow_no_indices: bool # A setting that does two separate checks on the index expression. If `false`, the request returns an error (1) if any wildcard expression (including `_all` and `*`) resolves to zero matching indices or (2) if the complete set of resolved indices, aliases or data streams is empty after all expressions are evaluated. If `true`, index expressions that resolve to no indices are allowed and the request returns an empty result., expand_wildcards: any # Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`., ignore_unavailable: bool # If `false`, the request returns an error if it targets a concrete (non-wildcarded) index, alias, or data stream that is missing, closed, or otherwise unavailable. If `true`, unavailable concrete targets are silently ignored.}
@returns(200) {_shards: any}

@endpoint GET /{index}/_reload_search_analyzers
@desc Reload search analyzers
@required {index: any # A comma-separated list of index names to reload analyzers for}
@optional {allow_no_indices: bool # A setting that does two separate checks on the index expression. If `false`, the request returns an error (1) if any wildcard expression (including `_all` and `*`) resolves to zero matching indices or (2) if the complete set of resolved indices, aliases or data streams is empty after all expressions are evaluated. If `true`, index expressions that resolve to no indices are allowed and the request returns an empty result., expand_wildcards: any # Whether to expand wildcard expression to concrete indices that are open, closed or both., ignore_unavailable: bool # If `false`, the request returns an error if it targets a concrete (non-wildcarded) index, alias, or data stream that is missing, closed, or otherwise unavailable. If `true`, unavailable concrete targets are silently ignored., resource: str # Changed resource to reload analyzers from if applicable}
@returns(200) {reload_details: [map], _shards: any}

@endpoint POST /{index}/_reload_search_analyzers
@desc Reload search analyzers
@required {index: any # A comma-separated list of index names to reload analyzers for}
@optional {allow_no_indices: bool # A setting that does two separate checks on the index expression. If `false`, the request returns an error (1) if any wildcard expression (including `_all` and `*`) resolves to zero matching indices or (2) if the complete set of resolved indices, aliases or data streams is empty after all expressions are evaluated. If `true`, index expressions that resolve to no indices are allowed and the request returns an empty result., expand_wildcards: any # Whether to expand wildcard expression to concrete indices that are open, closed or both., ignore_unavailable: bool # If `false`, the request returns an error if it targets a concrete (non-wildcarded) index, alias, or data stream that is missing, closed, or otherwise unavailable. If `true`, unavailable concrete targets are silently ignored., resource: str # Changed resource to reload analyzers from if applicable}
@returns(200) {reload_details: [map], _shards: any}

@endgroup

@group _resolve
@endpoint GET /_resolve/cluster
@desc Resolve the cluster
@optional {allow_no_indices: bool # A setting that does two separate checks on the index expression. If `false`, the request returns an error (1) if any wildcard expression (including `_all` and `*`) resolves to zero matching indices or (2) if the complete set of resolved indices, aliases or data streams is empty after all expressions are evaluated. If `true`, index expressions that resolve to no indices are allowed and the request returns an empty result. NOTE: This option is only supported when specifying an index expression. You will get an error if you specify index options to the `_resolve/cluster` API endpoint that takes no index expression., expand_wildcards: any # Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`. NOTE: This option is only supported when specifying an index expression. You will get an error if you specify index options to the `_resolve/cluster` API endpoint that takes no index expression., ignore_throttled: bool # If true, concrete, expanded, or aliased indices are ignored when frozen. NOTE: This option is only supported when specifying an index expression. You will get an error if you specify index options to the `_resolve/cluster` API endpoint that takes no index expression., ignore_unavailable: bool # If `false`, the request returns an error if it targets a concrete (non-wildcarded) index, alias, or data stream that is missing, closed, or otherwise unavailable. If `true`, unavailable concrete targets are silently ignored. NOTE: This option is only supported when specifying an index expression. You will get an error if you specify index options to the `_resolve/cluster` API endpoint that takes no index expression., timeout: any # The maximum time to wait for remote clusters to respond. If a remote cluster does not respond within this timeout period, the API response will show the cluster as not connected and include an error message that the request timed out.  The default timeout is unset and the query can take as long as the networking layer is configured to wait for remote clusters that are not responding (typically 30 seconds).}
@returns(200)

@endpoint GET /_resolve/cluster/{name}
@desc Resolve the cluster
@required {name: any # A comma-separated list of names or index patterns for the indices, aliases, and data streams to resolve. Resources on remote clusters can be specified using the ``:`` syntax. Index and cluster exclusions (e.g., `-cluster1:*`) are also supported. If no index expression is specified, information about all remote clusters configured on the local cluster is returned without doing any index matching}
@optional {allow_no_indices: bool # A setting that does two separate checks on the index expression. If `false`, the request returns an error (1) if any wildcard expression (including `_all` and `*`) resolves to zero matching indices or (2) if the complete set of resolved indices, aliases or data streams is empty after all expressions are evaluated. If `true`, index expressions that resolve to no indices are allowed and the request returns an empty result. NOTE: This option is only supported when specifying an index expression. You will get an error if you specify index options to the `_resolve/cluster` API endpoint that takes no index expression., expand_wildcards: any # Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`. NOTE: This option is only supported when specifying an index expression. You will get an error if you specify index options to the `_resolve/cluster` API endpoint that takes no index expression., ignore_throttled: bool # If true, concrete, expanded, or aliased indices are ignored when frozen. NOTE: This option is only supported when specifying an index expression. You will get an error if you specify index options to the `_resolve/cluster` API endpoint that takes no index expression., ignore_unavailable: bool # If `false`, the request returns an error if it targets a concrete (non-wildcarded) index, alias, or data stream that is missing, closed, or otherwise unavailable. If `true`, unavailable concrete targets are silently ignored. NOTE: This option is only supported when specifying an index expression. You will get an error if you specify index options to the `_resolve/cluster` API endpoint that takes no index expression., timeout: any # The maximum time to wait for remote clusters to respond. If a remote cluster does not respond within this timeout period, the API response will show the cluster as not connected and include an error message that the request timed out.  The default timeout is unset and the query can take as long as the networking layer is configured to wait for remote clusters that are not responding (typically 30 seconds).}
@returns(200)

@endpoint GET /_resolve/index/{name}
@desc Resolve indices
@required {name: any # Comma-separated name(s) or index pattern(s) of the indices, aliases, and data streams to resolve. Resources on remote clusters can be specified using the ``:`` syntax.}
@optional {expand_wildcards: any # Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`., ignore_unavailable: bool # If `false`, the request returns an error if it targets a concrete (non-wildcarded) index, alias, or data stream that is missing, closed, or otherwise unavailable. If `true`, unavailable concrete targets are silently ignored., allow_no_indices: bool # A setting that does two separate checks on the index expression. If `false`, the request returns an error (1) if any wildcard expression (including `_all` and `*`) resolves to zero matching indices or (2) if the complete set of resolved indices, aliases or data streams is empty after all expressions are evaluated. If `true`, index expressions that resolve to no indices are allowed and the request returns an empty result., mode: any # Filter indices by index mode - standard, lookup, time_series, etc. Comma-separated list of IndexMode. Empty means no filter.}
@returns(200) {indices: [map], aliases: [map], data_streams: [map]}

@endpoint POST /_resolve/index/{name}
@desc Resolve indices
@required {name: any # Comma-separated name(s) or index pattern(s) of the indices, aliases, and data streams to resolve. Resources on remote clusters can be specified using the ``:`` syntax.}
@optional {expand_wildcards: any # Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`., ignore_unavailable: bool # If `false`, the request returns an error if it targets a concrete (non-wildcarded) index, alias, or data stream that is missing, closed, or otherwise unavailable. If `true`, unavailable concrete targets are silently ignored., allow_no_indices: bool # A setting that does two separate checks on the index expression. If `false`, the request returns an error (1) if any wildcard expression (including `_all` and `*`) resolves to zero matching indices or (2) if the complete set of resolved indices, aliases or data streams is empty after all expressions are evaluated. If `true`, index expressions that resolve to no indices are allowed and the request returns an empty result., mode: any # Filter indices by index mode - standard, lookup, time_series, etc. Comma-separated list of IndexMode. Empty means no filter.}
@returns(200) {indices: [map], aliases: [map], data_streams: [map]}

@endgroup

@group {alias}
@endpoint POST /{alias}/_rollover
@desc Roll over to a new index
@required {alias: str # Name of the data stream or index alias to roll over.}
@optional {dry_run: bool # If `true`, checks whether the current index satisfies the specified conditions but does not perform a rollover., master_timeout: any # Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error., timeout: any # Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error., wait_for_active_shards: any # The number of shard copies that must be active before proceeding with the operation. Set to all or any positive integer up to the total number of shards in the index (`number_of_replicas+1`)., lazy: bool # If set to true, the rollover action will only mark a data stream to signal that it needs to be rolled over at the next write. Only allowed on data streams., aliases: map # Aliases for the target index. Data streams do not support this parameter., conditions: any # Conditions for the rollover. If specified, Elasticsearch only performs the rollover if the current index satisfies these conditions. If this parameter is not specified, Elasticsearch performs the rollover unconditionally. If conditions are specified, at least one of them must be a `max_*` condition. The index will rollover if any `max_*` condition is satisfied and all `min_*` conditions are satisfied., mappings: any # Mapping for fields in the index. If specified, this mapping can include field names, field data types, and mapping paramaters., settings: map # Configuration options for the index. Data streams do not support this parameter.}
@returns(200) {acknowledged: bool, conditions: map, dry_run: bool, new_index: str, old_index: str, rolled_over: bool, shards_acknowledged: bool}
@example_request "{\n  \"conditions\": {\n    \"max_age\": \"7d\",\n    \"max_docs\": 1000,\n    \"max_primary_shard_size\": \"50gb\",\n    \"max_primary_shard_docs\": \"2000\"\n  }\n}"

@endpoint POST /{alias}/_rollover/{new_index}
@desc Roll over to a new index
@required {alias: str # Name of the data stream or index alias to roll over., new_index: str # Name of the index to create. Supports date math. Data streams do not support this parameter.}
@optional {dry_run: bool # If `true`, checks whether the current index satisfies the specified conditions but does not perform a rollover., master_timeout: any # Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error., timeout: any # Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error., wait_for_active_shards: any # The number of shard copies that must be active before proceeding with the operation. Set to all or any positive integer up to the total number of shards in the index (`number_of_replicas+1`)., lazy: bool # If set to true, the rollover action will only mark a data stream to signal that it needs to be rolled over at the next write. Only allowed on data streams., aliases: map # Aliases for the target index. Data streams do not support this parameter., conditions: any # Conditions for the rollover. If specified, Elasticsearch only performs the rollover if the current index satisfies these conditions. If this parameter is not specified, Elasticsearch performs the rollover unconditionally. If conditions are specified, at least one of them must be a `max_*` condition. The index will rollover if any `max_*` condition is satisfied and all `min_*` conditions are satisfied., mappings: any # Mapping for fields in the index. If specified, this mapping can include field names, field data types, and mapping paramaters., settings: map # Configuration options for the index. Data streams do not support this parameter.}
@returns(200) {acknowledged: bool, conditions: map, dry_run: bool, new_index: str, old_index: str, rolled_over: bool, shards_acknowledged: bool}
@example_request "{\n  \"conditions\": {\n    \"max_age\": \"7d\",\n    \"max_docs\": 1000,\n    \"max_primary_shard_size\": \"50gb\",\n    \"max_primary_shard_docs\": \"2000\"\n  }\n}"

@endgroup

@group _segments
@endpoint GET /_segments
@desc Get index segments
@optional {allow_no_indices: bool # A setting that does two separate checks on the index expression. If `false`, the request returns an error (1) if any wildcard expression (including `_all` and `*`) resolves to zero matching indices or (2) if the complete set of resolved indices, aliases or data streams is empty after all expressions are evaluated. If `true`, index expressions that resolve to no indices are allowed and the request returns an empty result., expand_wildcards: any # Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`., ignore_unavailable: bool # If `false`, the request returns an error if it targets a concrete (non-wildcarded) index, alias, or data stream that is missing, closed, or otherwise unavailable. If `true`, unavailable concrete targets are silently ignored.}
@returns(200) {indices: map, _shards: any}

@endgroup

@group {index}
@endpoint GET /{index}/_segments
@desc Get index segments
@required {index: any # Comma-separated list of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`.}
@optional {allow_no_indices: bool # A setting that does two separate checks on the index expression. If `false`, the request returns an error (1) if any wildcard expression (including `_all` and `*`) resolves to zero matching indices or (2) if the complete set of resolved indices, aliases or data streams is empty after all expressions are evaluated. If `true`, index expressions that resolve to no indices are allowed and the request returns an empty result., expand_wildcards: any # Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`., ignore_unavailable: bool # If `false`, the request returns an error if it targets a concrete (non-wildcarded) index, alias, or data stream that is missing, closed, or otherwise unavailable. If `true`, unavailable concrete targets are silently ignored.}
@returns(200) {indices: map, _shards: any}

@endgroup

@group _shard_stores
@endpoint GET /_shard_stores
@desc Get index shard stores
@optional {allow_no_indices: bool # A setting that does two separate checks on the index expression. If `false`, the request returns an error (1) if any wildcard expression (including `_all` and `*`) resolves to zero matching indices or (2) if the complete set of resolved indices, aliases or data streams is empty after all expressions are evaluated. If `true`, index expressions that resolve to no indices are allowed and the request returns an empty result., expand_wildcards: any # Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams., ignore_unavailable: bool # If `false`, the request returns an error if it targets a concrete (non-wildcarded) index, alias, or data stream that is missing, closed, or otherwise unavailable. If `true`, unavailable concrete targets are silently ignored., status: any # List of shard health statuses used to limit the request.}
@returns(200) {indices: map}

@endgroup

@group {index}
@endpoint GET /{index}/_shard_stores
@desc Get index shard stores
@required {index: any # List of data streams, indices, and aliases used to limit the request.}
@optional {allow_no_indices: bool # A setting that does two separate checks on the index expression. If `false`, the request returns an error (1) if any wildcard expression (including `_all` and `*`) resolves to zero matching indices or (2) if the complete set of resolved indices, aliases or data streams is empty after all expressions are evaluated. If `true`, index expressions that resolve to no indices are allowed and the request returns an empty result., expand_wildcards: any # Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams., ignore_unavailable: bool # If `false`, the request returns an error if it targets a concrete (non-wildcarded) index, alias, or data stream that is missing, closed, or otherwise unavailable. If `true`, unavailable concrete targets are silently ignored., status: any # List of shard health statuses used to limit the request.}
@returns(200) {indices: map}

@endpoint PUT /{index}/_shrink/{target}
@desc Shrink an index
@required {index: str # Name of the source index to shrink., target: str # Name of the target index to create.}
@optional {master_timeout: any # Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error., timeout: any # Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error., wait_for_active_shards: any # The number of shard copies that must be active before proceeding with the operation. Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`)., aliases: map # The key is the alias name. Index alias names support date math., settings: map # Configuration options for the target index.}
@returns(200) {acknowledged: bool, shards_acknowledged: bool, index: any}
@example_request "{\n  \"settings\": {\n    \"index.routing.allocation.require._name\": null,\n    \"index.blocks.write\": null\n  }\n}"

@endpoint POST /{index}/_shrink/{target}
@desc Shrink an index
@required {index: str # Name of the source index to shrink., target: str # Name of the target index to create.}
@optional {master_timeout: any # Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error., timeout: any # Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error., wait_for_active_shards: any # The number of shard copies that must be active before proceeding with the operation. Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`)., aliases: map # The key is the alias name. Index alias names support date math., settings: map # Configuration options for the target index.}
@returns(200) {acknowledged: bool, shards_acknowledged: bool, index: any}
@example_request "{\n  \"settings\": {\n    \"index.routing.allocation.require._name\": null,\n    \"index.blocks.write\": null\n  }\n}"

@endgroup

@group _index_template
@endpoint POST /_index_template/_simulate_index/{name}
@desc Simulate an index
@required {name: str # Name of the index to simulate, index_patterns: any # Array of wildcard (`*`) expressions used to match the names of data streams and indices during creation., composed_of: [str] # An ordered list of component template names. Component templates are merged in the order specified, meaning that the last component template specified has the highest precedence.}
@optional {create: bool # Whether the index template we optionally defined in the body should only be dry-run added if new or can also replace an existing one, cause: str # User defined reason for dry-run creating the new template for simulation purposes, master_timeout: any # Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error., include_defaults: bool # If true, returns all relevant default configurations for the index template., template: any # Template to be applied. It may optionally include an `aliases`, `mappings`, or `settings` configuration., version: any # Version number used to manage index templates externally. This number is not automatically generated by Elasticsearch., priority: num # Priority to determine index template precedence when a new data stream or index is created. The index template with the highest priority is chosen. If no priority is specified the template is treated as though it is of priority 0 (lowest priority). This number is not automatically generated by Elasticsearch., _meta: any # Optional user metadata about the index template. May have any contents. This map is not automatically generated by Elasticsearch., allow_auto_create: bool, data_stream: any # If this object is included, the template is used to create data streams and their backing indices. Supports an empty object. Data streams require a matching index template with a `data_stream` object., deprecated: bool # Marks this index template as deprecated. When creating or updating a non-deprecated index template that uses deprecated components, Elasticsearch will emit a deprecation warning., ignore_missing_component_templates: any # A list of component template names that are allowed to be absent., created_date: any # Date and time when the index template was created. Only returned if the `human` query parameter is `true`., created_date_millis: any # Date and time when the index template was created, in milliseconds since the epoch., modified_date: any # Date and time when the index template was last modified. Only returned if the `human` query parameter is `true`., modified_date_millis: any # Date and time when the index template was last modified, in milliseconds since the epoch.}
@returns(200) {overlapping: [map], template: any}

@endpoint POST /_index_template/_simulate
@desc Simulate an index template
@optional {create: bool # If true, the template passed in the body is only used if no existing templates match the same index patterns. If false, the simulation uses the template with the highest priority. Note that the template is not permanently added or updated in either case; it is only used for the simulation., cause: str # User defined reason for dry-run creating the new template for simulation purposes, master_timeout: any # Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error., include_defaults: bool # If true, returns all relevant default configurations for the index template., allow_auto_create: bool # This setting overrides the value of the `action.auto_create_index` cluster setting. If set to `true` in a template, then indices can be automatically created using that template even if auto-creation of indices is disabled via `actions.auto_create_index`. If set to `false`, then indices or data streams matching the template must always be explicitly created, and may never be automatically created., index_patterns: any # Array of wildcard (`*`) expressions used to match the names of data streams and indices during creation., composed_of: [str] # An ordered list of component template names. Component templates are merged in the order specified, meaning that the last component template specified has the highest precedence., template: any # Template to be applied. It may optionally include an `aliases`, `mappings`, or `settings` configuration., data_stream: any # If this object is included, the template is used to create data streams and their backing indices. Supports an empty object. Data streams require a matching index template with a `data_stream` object., priority: num # Priority to determine index template precedence when a new data stream or index is created. The index template with the highest priority is chosen. If no priority is specified the template is treated as though it is of priority 0 (lowest priority). This number is not automatically generated by Elasticsearch., version: any # Version number used to manage index templates externally. This number is not automatically generated by Elasticsearch., _meta: any # Optional user metadata about the index template. May have any contents. This map is not automatically generated by Elasticsearch., ignore_missing_component_templates: [str] # The configuration option ignore_missing_component_templates can be used when an index template references a component template that might not exist, deprecated: bool # Marks this index template as deprecated. When creating or updating a non-deprecated index template that uses deprecated components, Elasticsearch will emit a deprecation warning.}
@returns(200) {overlapping: [map], template: any}
@example_request "{\n  \"index_patterns\": [\"my-index-*\"],\n  \"composed_of\": [\"ct2\"],\n  \"priority\": 10,\n  \"template\": {\n    \"settings\": {\n      \"index.number_of_replicas\": 1\n    }\n  }\n}"

@endpoint POST /_index_template/_simulate/{name}
@desc Simulate an index template
@required {name: str # Name of the index template to simulate. To test a template configuration before you add it to the cluster, omit this parameter and specify the template configuration in the request body.}
@optional {create: bool # If true, the template passed in the body is only used if no existing templates match the same index patterns. If false, the simulation uses the template with the highest priority. Note that the template is not permanently added or updated in either case; it is only used for the simulation., cause: str # User defined reason for dry-run creating the new template for simulation purposes, master_timeout: any # Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error., include_defaults: bool # If true, returns all relevant default configurations for the index template., allow_auto_create: bool # This setting overrides the value of the `action.auto_create_index` cluster setting. If set to `true` in a template, then indices can be automatically created using that template even if auto-creation of indices is disabled via `actions.auto_create_index`. If set to `false`, then indices or data streams matching the template must always be explicitly created, and may never be automatically created., index_patterns: any # Array of wildcard (`*`) expressions used to match the names of data streams and indices during creation., composed_of: [str] # An ordered list of component template names. Component templates are merged in the order specified, meaning that the last component template specified has the highest precedence., template: any # Template to be applied. It may optionally include an `aliases`, `mappings`, or `settings` configuration., data_stream: any # If this object is included, the template is used to create data streams and their backing indices. Supports an empty object. Data streams require a matching index template with a `data_stream` object., priority: num # Priority to determine index template precedence when a new data stream or index is created. The index template with the highest priority is chosen. If no priority is specified the template is treated as though it is of priority 0 (lowest priority). This number is not automatically generated by Elasticsearch., version: any # Version number used to manage index templates externally. This number is not automatically generated by Elasticsearch., _meta: any # Optional user metadata about the index template. May have any contents. This map is not automatically generated by Elasticsearch., ignore_missing_component_templates: [str] # The configuration option ignore_missing_component_templates can be used when an index template references a component template that might not exist, deprecated: bool # Marks this index template as deprecated. When creating or updating a non-deprecated index template that uses deprecated components, Elasticsearch will emit a deprecation warning.}
@returns(200) {overlapping: [map], template: any}
@example_request "{\n  \"index_patterns\": [\"my-index-*\"],\n  \"composed_of\": [\"ct2\"],\n  \"priority\": 10,\n  \"template\": {\n    \"settings\": {\n      \"index.number_of_replicas\": 1\n    }\n  }\n}"

@endgroup

@group {index}
@endpoint PUT /{index}/_split/{target}
@desc Split an index
@required {index: str # Name of the source index to split., target: str # Name of the target index to create.}
@optional {master_timeout: any # Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error., timeout: any # Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error., wait_for_active_shards: any # The number of shard copies that must be active before proceeding with the operation. Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`)., aliases: map # Aliases for the resulting index., settings: map # Configuration options for the target index.}
@returns(200) {acknowledged: bool, shards_acknowledged: bool, index: any}
@example_request "{\n  \"settings\": {\n    \"index.number_of_shards\": 2\n  }\n}"

@endpoint POST /{index}/_split/{target}
@desc Split an index
@required {index: str # Name of the source index to split., target: str # Name of the target index to create.}
@optional {master_timeout: any # Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error., timeout: any # Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error., wait_for_active_shards: any # The number of shard copies that must be active before proceeding with the operation. Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`)., aliases: map # Aliases for the resulting index., settings: map # Configuration options for the target index.}
@returns(200) {acknowledged: bool, shards_acknowledged: bool, index: any}
@example_request "{\n  \"settings\": {\n    \"index.number_of_shards\": 2\n  }\n}"

@endgroup

@group _stats
@endpoint GET /_stats
@desc Get index statistics
@optional {completion_fields: any # Comma-separated list or wildcard expressions of fields to include in fielddata and suggest statistics., expand_wildcards: any # Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`., fielddata_fields: any # Comma-separated list or wildcard expressions of fields to include in fielddata statistics., fields: any # Comma-separated list or wildcard expressions of fields to include in the statistics., forbid_closed_indices: bool # If true, statistics are not collected from closed indices., groups: any # Comma-separated list of search groups to include in the search statistics., include_segment_file_sizes: bool # If true, the call reports the aggregated disk usage of each one of the Lucene index files (only applies if segment stats are requested)., include_unloaded_segments: bool # If true, the response includes information from segments that are not loaded into memory., level: str # Indicates whether statistics are aggregated at the cluster, indices, or shards level.}
@returns(200) {indices: map, _shards: any, _all: any}

@endpoint GET /_stats/{metric}
@desc Get index statistics
@required {metric: any # Comma-separated list of metrics used to limit the request.}
@optional {completion_fields: any # Comma-separated list or wildcard expressions of fields to include in fielddata and suggest statistics., expand_wildcards: any # Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`., fielddata_fields: any # Comma-separated list or wildcard expressions of fields to include in fielddata statistics., fields: any # Comma-separated list or wildcard expressions of fields to include in the statistics., forbid_closed_indices: bool # If true, statistics are not collected from closed indices., groups: any # Comma-separated list of search groups to include in the search statistics., include_segment_file_sizes: bool # If true, the call reports the aggregated disk usage of each one of the Lucene index files (only applies if segment stats are requested)., include_unloaded_segments: bool # If true, the response includes information from segments that are not loaded into memory., level: str # Indicates whether statistics are aggregated at the cluster, indices, or shards level.}
@returns(200) {indices: map, _shards: any, _all: any}

@endgroup

@group {index}
@endpoint GET /{index}/_stats
@desc Get index statistics
@required {index: any # A comma-separated list of index names; use `_all` or empty string to perform the operation on all indices}
@optional {completion_fields: any # Comma-separated list or wildcard expressions of fields to include in fielddata and suggest statistics., expand_wildcards: any # Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`., fielddata_fields: any # Comma-separated list or wildcard expressions of fields to include in fielddata statistics., fields: any # Comma-separated list or wildcard expressions of fields to include in the statistics., forbid_closed_indices: bool # If true, statistics are not collected from closed indices., groups: any # Comma-separated list of search groups to include in the search statistics., include_segment_file_sizes: bool # If true, the call reports the aggregated disk usage of each one of the Lucene index files (only applies if segment stats are requested)., include_unloaded_segments: bool # If true, the response includes information from segments that are not loaded into memory., level: str # Indicates whether statistics are aggregated at the cluster, indices, or shards level.}
@returns(200) {indices: map, _shards: any, _all: any}

@endpoint GET /{index}/_stats/{metric}
@desc Get index statistics
@required {index: any # A comma-separated list of index names; use `_all` or empty string to perform the operation on all indices, metric: any # Comma-separated list of metrics used to limit the request.}
@optional {completion_fields: any # Comma-separated list or wildcard expressions of fields to include in fielddata and suggest statistics., expand_wildcards: any # Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`., fielddata_fields: any # Comma-separated list or wildcard expressions of fields to include in fielddata statistics., fields: any # Comma-separated list or wildcard expressions of fields to include in the statistics., forbid_closed_indices: bool # If true, statistics are not collected from closed indices., groups: any # Comma-separated list of search groups to include in the search statistics., include_segment_file_sizes: bool # If true, the call reports the aggregated disk usage of each one of the Lucene index files (only applies if segment stats are requested)., include_unloaded_segments: bool # If true, the response includes information from segments that are not loaded into memory., level: str # Indicates whether statistics are aggregated at the cluster, indices, or shards level.}
@returns(200) {indices: map, _shards: any, _all: any}

@endgroup

@group _aliases
@endpoint POST /_aliases
@desc Create or update an alias
@optional {master_timeout: any # Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error., timeout: any # Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error., actions: [map{add: any, remove: any, remove_index: any}] # Actions to perform.}
@returns(200) {acknowledged: bool}
@example_request "{\n  \"actions\": [\n    {\n      \"add\": {\n        \"index\": \"logs-nginx.access-prod\",\n        \"alias\": \"logs\"\n      }\n    }\n  ]\n}"

@endgroup

@group _validate
@endpoint GET /_validate/query
@desc Validate a query
@optional {allow_no_indices: bool # A setting that does two separate checks on the index expression. If `false`, the request returns an error (1) if any wildcard expression (including `_all` and `*`) resolves to zero matching indices or (2) if the complete set of resolved indices, aliases or data streams is empty after all expressions are evaluated. If `true`, index expressions that resolve to no indices are allowed and the request returns an empty result., all_shards: bool # If `true`, the validation is executed on all shards instead of one random shard per index., analyzer: str # Analyzer to use for the query string. This parameter can only be used when the `q` query string parameter is specified., analyze_wildcard: bool # If `true`, wildcard and prefix queries are analyzed., default_operator: str # The default operator for query string query: `and` or `or`., df: str # Field to use as default where no field prefix is given in the query string. This parameter can only be used when the `q` query string parameter is specified., expand_wildcards: any # Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`., explain: bool # If `true`, the response returns detailed information if an error has occurred., ignore_unavailable: bool # If `false`, the request returns an error if it targets a concrete (non-wildcarded) index, alias, or data stream that is missing, closed, or otherwise unavailable. If `true`, unavailable concrete targets are silently ignored., lenient: bool # If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored., rewrite: bool # If `true`, returns a more detailed explanation showing the actual Lucene query that will be executed., q: str # Query in the Lucene query string syntax., query: any # Query in the Lucene query string syntax.}
@returns(200) {explanations: [map], _shards: any, valid: bool, error: str}

@endpoint POST /_validate/query
@desc Validate a query
@optional {allow_no_indices: bool # A setting that does two separate checks on the index expression. If `false`, the request returns an error (1) if any wildcard expression (including `_all` and `*`) resolves to zero matching indices or (2) if the complete set of resolved indices, aliases or data streams is empty after all expressions are evaluated. If `true`, index expressions that resolve to no indices are allowed and the request returns an empty result., all_shards: bool # If `true`, the validation is executed on all shards instead of one random shard per index., analyzer: str # Analyzer to use for the query string. This parameter can only be used when the `q` query string parameter is specified., analyze_wildcard: bool # If `true`, wildcard and prefix queries are analyzed., default_operator: str # The default operator for query string query: `and` or `or`., df: str # Field to use as default where no field prefix is given in the query string. This parameter can only be used when the `q` query string parameter is specified., expand_wildcards: any # Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`., explain: bool # If `true`, the response returns detailed information if an error has occurred., ignore_unavailable: bool # If `false`, the request returns an error if it targets a concrete (non-wildcarded) index, alias, or data stream that is missing, closed, or otherwise unavailable. If `true`, unavailable concrete targets are silently ignored., lenient: bool # If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored., rewrite: bool # If `true`, returns a more detailed explanation showing the actual Lucene query that will be executed., q: str # Query in the Lucene query string syntax., query: any # Query in the Lucene query string syntax.}
@returns(200) {explanations: [map], _shards: any, valid: bool, error: str}

@endgroup

@group {index}
@endpoint GET /{index}/_validate/query
@desc Validate a query
@required {index: any # Comma-separated list of data streams, indices, and aliases to search. Supports wildcards (`*`). To search all data streams or indices, omit this parameter or use `*` or `_all`.}
@optional {allow_no_indices: bool # A setting that does two separate checks on the index expression. If `false`, the request returns an error (1) if any wildcard expression (including `_all` and `*`) resolves to zero matching indices or (2) if the complete set of resolved indices, aliases or data streams is empty after all expressions are evaluated. If `true`, index expressions that resolve to no indices are allowed and the request returns an empty result., all_shards: bool # If `true`, the validation is executed on all shards instead of one random shard per index., analyzer: str # Analyzer to use for the query string. This parameter can only be used when the `q` query string parameter is specified., analyze_wildcard: bool # If `true`, wildcard and prefix queries are analyzed., default_operator: str # The default operator for query string query: `and` or `or`., df: str # Field to use as default where no field prefix is given in the query string. This parameter can only be used when the `q` query string parameter is specified., expand_wildcards: any # Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`., explain: bool # If `true`, the response returns detailed information if an error has occurred., ignore_unavailable: bool # If `false`, the request returns an error if it targets a concrete (non-wildcarded) index, alias, or data stream that is missing, closed, or otherwise unavailable. If `true`, unavailable concrete targets are silently ignored., lenient: bool # If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored., rewrite: bool # If `true`, returns a more detailed explanation showing the actual Lucene query that will be executed., q: str # Query in the Lucene query string syntax., query: any # Query in the Lucene query string syntax.}
@returns(200) {explanations: [map], _shards: any, valid: bool, error: str}

@endpoint POST /{index}/_validate/query
@desc Validate a query
@required {index: any # Comma-separated list of data streams, indices, and aliases to search. Supports wildcards (`*`). To search all data streams or indices, omit this parameter or use `*` or `_all`.}
@optional {allow_no_indices: bool # A setting that does two separate checks on the index expression. If `false`, the request returns an error (1) if any wildcard expression (including `_all` and `*`) resolves to zero matching indices or (2) if the complete set of resolved indices, aliases or data streams is empty after all expressions are evaluated. If `true`, index expressions that resolve to no indices are allowed and the request returns an empty result., all_shards: bool # If `true`, the validation is executed on all shards instead of one random shard per index., analyzer: str # Analyzer to use for the query string. This parameter can only be used when the `q` query string parameter is specified., analyze_wildcard: bool # If `true`, wildcard and prefix queries are analyzed., default_operator: str # The default operator for query string query: `and` or `or`., df: str # Field to use as default where no field prefix is given in the query string. This parameter can only be used when the `q` query string parameter is specified., expand_wildcards: any # Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`., explain: bool # If `true`, the response returns detailed information if an error has occurred., ignore_unavailable: bool # If `false`, the request returns an error if it targets a concrete (non-wildcarded) index, alias, or data stream that is missing, closed, or otherwise unavailable. If `true`, unavailable concrete targets are silently ignored., lenient: bool # If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored., rewrite: bool # If `true`, returns a more detailed explanation showing the actual Lucene query that will be executed., q: str # Query in the Lucene query string syntax., query: any # Query in the Lucene query string syntax.}
@returns(200) {explanations: [map], _shards: any, valid: bool, error: str}

@endgroup

@group _inference
@endpoint POST /_inference/chat_completion/{inference_id}/_stream
@desc Perform chat completion inference on the service
@required {inference_id: str # The inference Id, messages: [map{content: any, role!: str, tool_call_id: any, tool_calls: [map], reasoning: str, reasoning_details: [any]}] # A list of objects representing the conversation. Requests should generally only add new messages from the user (role `user`). The other message roles (`assistant`, `system`, or `tool`) should generally only be copied from the response to a previous completion request, such that the messages array is built up throughout a conversation.}
@optional {timeout: any # Specifies the amount of time to wait for the inference request to complete., model: str # The ID of the model to use. By default, the model ID is set to the value included when creating the inference endpoint., max_completion_tokens: num # The upper bound limit for the number of tokens that can be generated for a completion request., reasoning: any # The reasoning configuration for the completion request. This controls the model's reasoning process in one of two ways:  * By specifying the model’s reasoning effort level with the `effort` field. * By enabling reasoning with default settings by setting `enabled` field to `true`.  It also includes optional settings to control:  * The level of detail in the summary returned in the response with the `summary` field. * Whether reasoning details are included in the response at all with the `exclude` field.  Example (effort): ``` {    "reasoning": {        "effort": "high",        "summary": "concise",        "exclude": false    } } ``` Example (enabled): ``` {    "reasoning": {        "enabled": true,        "summary": "concise",        "exclude": false    } } ``` Currently supported only for `elastic` provider., stop: [str] # A sequence of strings to control when the model should stop generating additional tokens., temperature: num # The sampling temperature to use., tool_choice: any # Controls which tool is called by the model. String representation: One of `auto`, `none`, or `requrired`. `auto` allows the model to choose between calling tools and generating a message. `none` causes the model to not call any tools. `required` forces the model to call one or more tools. Example (object representation): ``` {   "tool_choice": {       "type": "function",       "function": {           "name": "get_current_weather"       }   } } ```, tools: [map{type!: str, function!: any}] # A list of tools that the model can call. Example: ``` {   "tools": [       {           "type": "function",           "function": {               "name": "get_price_of_item",               "description": "Get the current price of an item",               "parameters": {                   "type": "object",                   "properties": {                       "item": {                           "id": "12345"                       },                       "unit": {                           "type": "currency"                       }                   }               }           }       }   ] } ```, top_p: num # Nucleus sampling, an alternative to sampling with temperature.}
@returns(200)
@example_request "{\n  \"model\": \"gpt-4o\",\n  \"messages\": [\n      {\n          \"role\": \"user\",\n          \"content\": \"What is Elastic?\"\n      }\n  ]\n}"

@endpoint POST /_inference/completion/{inference_id}
@desc Perform completion inference on the service
@required {inference_id: str # The inference Id, input: any # Inference input. Either a string or an array of strings.}
@optional {timeout: any # Specifies the amount of time to wait for the inference request to complete., task_settings: any # Task settings for the individual inference request. These settings are specific to the  you specified and override the task settings specified when initializing the service.}
@returns(200) {completion: [map]}
@example_request "{\n  \"input\": \"What is Elastic?\"\n}"

@endpoint GET /_inference/{inference_id}
@desc Get an inference endpoint
@required {inference_id: str # The inference Id of the endpoint to return. Using `_all` or `*` will return all endpoints with the specified `task_type` if one is specified, or all endpoints for all task types if no `task_type` is specified}
@returns(200) {endpoints: [any]}

@endpoint PUT /_inference/{inference_id}
@desc Create an inference endpoint
@required {inference_id: str # The inference Id, service: str # The service type, service_settings: any # Settings specific to the service}
@optional {timeout: any # Specifies the amount of time to wait for the inference endpoint to be created., chunking_settings: any # The chunking configuration object. Applies only to the `embedding`, `sparse_embedding` and `text_embedding` task types. Not applicable to the `rerank`, `completion`, or `chat_completion` task types., task_settings: any # Task settings specific to the service and task type}
@returns(200)
@example_request "{\n \"service\": \"cohere\",\n \"service_settings\": {\n   \"model_id\": \"rerank-english-v3.0\",\n   \"api_key\": \"{{COHERE_API_KEY}}\"\n }\n}"

@endpoint POST /_inference/{inference_id}
@desc Perform inference on the service
@required {inference_id: str # The unique identifier for the inference endpoint., input: any # The text on which you want to perform the inference task. It can be a single string or an array.  > info > Inference endpoints for the `completion` task type currently only support a single string as input.}
@optional {timeout: any # The amount of time to wait for the inference request to complete., query: str # The query input, which is required only for the `rerank` task. It is not required for other tasks., input_type: str # Specifies the input data type for the embedding model. The `input_type` parameter only applies to Inference Endpoints with the `embedding` or `text_embedding` task type. Possible values include: * `SEARCH` * `INGEST` * `CLASSIFICATION` * `CLUSTERING` Not all services support all values. Unsupported values will trigger a validation exception. Accepted values depend on the configured inference service, refer to the relevant service-specific documentation for more info.  > info > The `input_type` parameter specified on the root level of the request body will take precedence over the `input_type` parameter specified in `task_settings`., task_settings: any # Task settings for the individual inference request. These settings are specific to the task type you specified and override the task settings specified when initializing the service.}
@returns(200) {embeddings_bytes: [map], embeddings_bits: [map], embeddings: [map], text_embedding_bytes: [map], text_embedding_bits: [map], text_embedding: [map], sparse_embedding: [map], completion: [map], rerank: [map]}

@endpoint DELETE /_inference/{inference_id}
@desc Delete an inference endpoint
@required {inference_id: str # The inference identifier.}
@optional {dry_run: bool # When true, checks the semantic_text fields and inference processors that reference the endpoint and returns them in a list, but does not delete the endpoint., force: bool # When true, the inference endpoint is forcefully deleted even if it is still being used by ingest processors or semantic text fields.}
@returns(200)

@endpoint GET /_inference/{task_type}/{inference_id}
@desc Get an inference endpoint
@required {task_type: str # The task type of the endpoint to return, inference_id: str # The inference Id of the endpoint to return. Using `_all` or `*` will return all endpoints with the specified `task_type` if one is specified, or all endpoints for all task types if no `task_type` is specified}
@returns(200) {endpoints: [any]}

@endpoint PUT /_inference/{task_type}/{inference_id}
@desc Create an inference endpoint
@required {task_type: str # The task type. Refer to the integration list in the API description for the available task types., inference_id: str # The inference Id, service: str # The service type, service_settings: any # Settings specific to the service}
@optional {timeout: any # Specifies the amount of time to wait for the inference endpoint to be created., chunking_settings: any # The chunking configuration object. Applies only to the `embedding`, `sparse_embedding` and `text_embedding` task types. Not applicable to the `rerank`, `completion`, or `chat_completion` task types., task_settings: any # Task settings specific to the service and task type}
@returns(200)
@example_request "{\n \"service\": \"cohere\",\n \"service_settings\": {\n   \"model_id\": \"rerank-english-v3.0\",\n   \"api_key\": \"{{COHERE_API_KEY}}\"\n }\n}"

@endpoint POST /_inference/{task_type}/{inference_id}
@desc Perform inference on the service
@required {task_type: str # The type of inference task that the model performs., inference_id: str # The unique identifier for the inference endpoint., input: any # The text on which you want to perform the inference task. It can be a single string or an array.  > info > Inference endpoints for the `completion` task type currently only support a single string as input.}
@optional {timeout: any # The amount of time to wait for the inference request to complete., query: str # The query input, which is required only for the `rerank` task. It is not required for other tasks., input_type: str # Specifies the input data type for the embedding model. The `input_type` parameter only applies to Inference Endpoints with the `embedding` or `text_embedding` task type. Possible values include: * `SEARCH` * `INGEST` * `CLASSIFICATION` * `CLUSTERING` Not all services support all values. Unsupported values will trigger a validation exception. Accepted values depend on the configured inference service, refer to the relevant service-specific documentation for more info.  > info > The `input_type` parameter specified on the root level of the request body will take precedence over the `input_type` parameter specified in `task_settings`., task_settings: any # Task settings for the individual inference request. These settings are specific to the task type you specified and override the task settings specified when initializing the service.}
@returns(200) {embeddings_bytes: [map], embeddings_bits: [map], embeddings: [map], text_embedding_bytes: [map], text_embedding_bits: [map], text_embedding: [map], sparse_embedding: [map], completion: [map], rerank: [map]}

@endpoint DELETE /_inference/{task_type}/{inference_id}
@desc Delete an inference endpoint
@required {task_type: str # The task type, inference_id: str # The inference identifier.}
@optional {dry_run: bool # When true, checks the semantic_text fields and inference processors that reference the endpoint and returns them in a list, but does not delete the endpoint., force: bool # When true, the inference endpoint is forcefully deleted even if it is still being used by ingest processors or semantic text fields.}
@returns(200)

@endpoint POST /_inference/embedding/{inference_id}
@desc Perform dense embedding inference on the service
@required {inference_id: str # The inference Id, input: any # Inference input. Either a string, an array of strings, a `content` object, or an array of `content` objects.  string example: ``` "input": "Some text" ``` string array example: ``` "input": ["Some text", "Some more text"] ``` `content` object example: ``` "input": {     "content": {       "type": "image",       "format": "base64",       "value": "data:image/jpeg;base64,..."     }   } ``` `content` object array example: ``` "input": [   {     "content": {       "type": "text",       "format": "text",       "value": "Some text to generate an embedding"     }   },   {     "content": {       "type": "image",       "format": "base64",       "value": "data:image/jpeg;base64,..."     }   } ] ```}
@optional {timeout: any # Specifies the amount of time to wait for the inference request to complete., input_type: str # The input data type for the embedding model. Possible values include: * `SEARCH` * `INGEST` * `CLASSIFICATION` * `CLUSTERING`  Not all models support all values. Unsupported values will trigger a validation exception. Accepted values depend on the configured inference service, refer to the relevant service-specific documentation for more info.  > info > The `input_type` parameter specified on the root level of the request body will take precedence over the `input_type` parameter specified in `task_settings`., task_settings: any # Task settings for the individual inference request. These settings are specific to the  you specified and override the task settings specified when initializing the service.}
@returns(200) {embeddings_bytes: [map], embeddings_bits: [map], embeddings: [map]}
@example_request "{\n  \"input\": [\n      {\n          \"content\": {\n              \"type\": \"image\",\n              \"format\": \"base64\",\n              \"value\": \"data:image/jpeg;base64,...\"\n          }\n      },\n      {\n          \"content\": {\n              \"type\": \"text\",\n              \"value\": \"Some text to create an embedding\"\n          }\n      }\n  ]\n}"

@endpoint GET /_inference
@desc Get an inference endpoint
@returns(200) {endpoints: [any]}

@endpoint GET /_inference/{task_type}/_all
@desc Get an inference endpoint
@required {task_type: str # The task type of the endpoint to return}
@returns(200) {endpoints: [any]}

@endpoint PUT /_inference/{task_type}/{ai21_inference_id}
@desc Create a AI21 inference endpoint
@required {task_type: str # The type of the inference task that the model will perform., ai21_inference_id: str # The unique identifier of the inference endpoint., service: any # The type of service supported for the specified task type. In this case, `ai21`., service_settings: any # Settings used to install the inference model. These settings are specific to the `ai21` service.}
@optional {timeout: any # Specifies the amount of time to wait for the inference endpoint to be created.}
@returns(200)
@example_request "{\n  \"service\": \"ai21\",\n  \"service_settings\": {\n    \"api_key\": \"ai21-api-key\",\n    \"model_id\": \"jamba-large\" \n  }\n}"

@endpoint PUT /_inference/{task_type}/{alibabacloud_inference_id}
@desc Create an AlibabaCloud AI Search inference endpoint
@required {task_type: str # The type of the inference task that the model will perform., alibabacloud_inference_id: str # The unique identifier of the inference endpoint., service: any # The type of service supported for the specified task type. In this case, `alibabacloud-ai-search`., service_settings: any # Settings used to install the inference model. These settings are specific to the `alibabacloud-ai-search` service.}
@optional {timeout: any # Specifies the amount of time to wait for the inference endpoint to be created., chunking_settings: any # The chunking configuration object. Applies only to the `sparse_embedding` or `text_embedding` task types. Not applicable to the `rerank` or `completion` task types., task_settings: any # Settings to configure the inference task. These settings are specific to the task type you specified.}
@returns(200)
@example_request "{\n    \"service\": \"alibabacloud-ai-search\",\n    \"service_settings\": {\n        \"host\" : \"default-j01.platform-cn-shanghai.opensearch.aliyuncs.com\",\n        \"api_key\": \"AlibabaCloud-API-Key\",\n        \"service_id\": \"ops-qwen-turbo\",\n        \"workspace\" : \"default\"\n    }\n}"

@endpoint PUT /_inference/{task_type}/{amazonbedrock_inference_id}
@desc Create an Amazon Bedrock inference endpoint
@required {task_type: str # The type of the inference task that the model will perform., amazonbedrock_inference_id: str # The unique identifier of the inference endpoint., service: any # The type of service supported for the specified task type. In this case, `amazonbedrock`., service_settings: any # Settings used to install the inference model. These settings are specific to the `amazonbedrock` service.}
@optional {timeout: any # Specifies the amount of time to wait for the inference endpoint to be created., chunking_settings: any # The chunking configuration object. Applies only to the `text_embedding` task type. Not applicable to the `chat_completion` and `completion` task types., task_settings: any # Settings to configure the inference task. These settings are specific to the task type you specified.}
@returns(200)
@example_request "{\n    \"service\": \"amazonbedrock\",\n    \"service_settings\": {\n        \"access_key\": \"AWS-access-key\",\n        \"secret_key\": \"AWS-secret-key\",\n        \"region\": \"us-east-1\",\n        \"provider\": \"amazontitan\",\n        \"model\": \"amazon.titan-embed-text-v2:0\"\n    }\n}"

@endpoint PUT /_inference/{task_type}/{amazonsagemaker_inference_id}
@desc Create an Amazon SageMaker inference endpoint
@required {task_type: str # The type of the inference task that the model will perform., amazonsagemaker_inference_id: str # The unique identifier of the inference endpoint., service: any # The type of service supported for the specified task type. In this case, `amazon_sagemaker`., service_settings: any # Settings used to install the inference model. These settings are specific to the `amazon_sagemaker` service and `service_settings.api` you specified.}
@optional {timeout: any # Specifies the amount of time to wait for the inference endpoint to be created., chunking_settings: any # The chunking configuration object. Applies only to the `sparse_embedding` or `text_embedding` task types. Not applicable to the `rerank`, `completion`, or `chat_completion` task types., task_settings: any # Settings to configure the inference task. These settings are specific to the task type and `service_settings.api` you specified.}
@returns(200)
@example_request "{\n    \"service\": \"amazon_sagemaker\",\n    \"service_settings\": {\n        \"access_key\": \"AWS-access-key\",\n        \"secret_key\": \"AWS-secret-key\",\n        \"region\": \"us-east-1\",\n        \"api\": \"elastic\",\n        \"endpoint_name\": \"my-endpoint\",\n        \"dimensions\": 384,\n        \"element_type\": \"float\"\n    }\n}"

@endpoint PUT /_inference/{task_type}/{anthropic_inference_id}
@desc Create an Anthropic inference endpoint
@required {task_type: str # The task type. The only valid task type for the model to perform is `completion`., anthropic_inference_id: str # The unique identifier of the inference endpoint., service: any # The type of service supported for the specified task type. In this case, `anthropic`., service_settings: any # Settings used to install the inference model. These settings are specific to the `anthropic` service.}
@optional {timeout: any # Specifies the amount of time to wait for the inference endpoint to be created., task_settings: any # Settings to configure the inference task. These settings are specific to the task type you specified.}
@returns(200)
@example_request "{\n    \"service\": \"anthropic\",\n    \"service_settings\": {\n        \"api_key\": \"Anthropic-Api-Key\",\n        \"model_id\": \"Model-ID\"\n    },\n    \"task_settings\": {\n        \"max_tokens\": 1024\n    }\n}"

@endpoint PUT /_inference/{task_type}/{azureaistudio_inference_id}
@desc Create an Azure AI studio inference endpoint
@required {task_type: str # The type of the inference task that the model will perform., azureaistudio_inference_id: str # The unique identifier of the inference endpoint., service: any # The type of service supported for the specified task type. In this case, `azureaistudio`., service_settings: any # Settings used to install the inference model. These settings are specific to the `azureaistudio` service.}
@optional {timeout: any # Specifies the amount of time to wait for the inference endpoint to be created., chunking_settings: any # The chunking configuration object. Applies only to the `text_embedding` task type. Not applicable to the `rerank` or `completion` task types., task_settings: any # Settings to configure the inference task. These settings are specific to the task type you specified.}
@returns(200)
@example_request "{\n    \"service\": \"azureaistudio\",\n    \"service_settings\": {\n        \"api_key\": \"Azure-AI-Studio-API-key\",\n        \"target\": \"Target-Uri\",\n        \"provider\": \"openai\",\n        \"endpoint_type\": \"token\"\n    }\n}"

@endpoint PUT /_inference/{task_type}/{azureopenai_inference_id}
@desc Create an Azure OpenAI inference endpoint
@required {task_type: str # The type of the inference task that the model will perform. NOTE: The `chat_completion` task type only supports streaming and only through the _stream API., azureopenai_inference_id: str # The unique identifier of the inference endpoint., service: any # The type of service supported for the specified task type. In this case, `azureopenai`., service_settings: any # Settings used to install the inference model. These settings are specific to the `azureopenai` service.}
@optional {timeout: any # Specifies the amount of time to wait for the inference endpoint to be created., chunking_settings: any # The chunking configuration object. Applies only to the `text_embedding` task type. Not applicable to the `completion` and `chat_completion` task types., task_settings: any # Settings to configure the inference task. These settings are specific to the task type you specified.}
@returns(200)
@example_request "{\n    \"service\": \"azureopenai\",\n    \"service_settings\": {\n        \"api_key\": \"Api-Key\",\n        \"resource_name\": \"Resource-name\",\n        \"deployment_id\": \"Deployment-id\",\n        \"api_version\": \"2024-02-01\"\n    }\n}"

@endpoint PUT /_inference/{task_type}/{cohere_inference_id}
@desc Create a Cohere inference endpoint
@required {task_type: str # The type of the inference task that the model will perform., cohere_inference_id: str # The unique identifier of the inference endpoint., service: any # The type of service supported for the specified task type. In this case, `cohere`., service_settings: any # Settings used to install the inference model. These settings are specific to the `cohere` service.}
@optional {timeout: any # Specifies the amount of time to wait for the inference endpoint to be created., chunking_settings: any # The chunking configuration object. Applies only to the `text_embedding` task type. Not applicable to the `rerank` or `completion` task type., task_settings: any # Settings to configure the inference task. These settings are specific to the task type you specified.}
@returns(200)
@example_request "{\n    \"service\": \"cohere\",\n    \"service_settings\": {\n        \"api_key\": \"Cohere-Api-key\",\n        \"model_id\": \"embed-english-light-v3.0\",\n        \"embedding_type\": \"byte\"\n    }\n}"

@endpoint PUT /_inference/{task_type}/{contextualai_inference_id}
@desc Create an Contextual AI inference endpoint
@required {task_type: str # The type of the inference task that the model will perform., contextualai_inference_id: str # The unique identifier of the inference endpoint., service: any # The type of service supported for the specified task type. In this case, `contextualai`., service_settings: any # Settings used to install the inference model. These settings are specific to the `contextualai` service.}
@optional {timeout: any # Specifies the amount of time to wait for the inference endpoint to be created., task_settings: any # Settings to configure the inference task. These settings are specific to the task type you specified.}
@returns(200)
@example_request "{\n    \"service\": \"contextualai\",\n    \"service_settings\": {\n        \"api_key\": \"ContextualAI-Api-key\",\n        \"model_id\": \"ctxl-rerank-v2-instruct-multilingual-mini\"\n    },\n    \"task_settings\": {\n        \"instruction\": \"Rerank the following documents based on their relevance to the query.\",\n        \"top_k\": 3\n    }\n}"

@endpoint PUT /_inference/{task_type}/{custom_inference_id}
@desc Create a custom inference endpoint
@required {task_type: str # The type of the inference task that the model will perform., custom_inference_id: str # The unique identifier of the inference endpoint., service: any # The type of service supported for the specified task type. In this case, `custom`., service_settings: any # Settings used to install the inference model. These settings are specific to the `custom` service.}
@optional {chunking_settings: any # The chunking configuration object. Applies only to the `sparse_embedding` or `text_embedding` task types. Not applicable to the `rerank` or `completion` task types., task_settings: any # Settings to configure the inference task. These settings are specific to the task type you specified.}
@returns(200)
@example_request "{\n    \"service\": \"custom\",\n    \"service_settings\": {\n        \"secret_parameters\": {\n            \"api_key\": \"<api key>\"\n        },\n        \"url\": \"https://api.openai.com/v1/embeddings\",\n        \"headers\": {\n            \"Authorization\": \"Bearer ${api_key}\",\n            \"Content-Type\": \"application/json;charset=utf-8\"\n        },\n        \"request\": \"{\\\"input\\\": ${input}, \\\"model\\\": \\\"text-embedding-3-small\\\"}\",\n        \"response\": {\n            \"json_parser\": {\n                \"text_embeddings\": \"$.data[*].embedding[*]\"\n            }\n        }\n    }\n}"

@endpoint PUT /_inference/{task_type}/{deepseek_inference_id}
@desc Create a DeepSeek inference endpoint
@required {task_type: str # The type of the inference task that the model will perform., deepseek_inference_id: str # The unique identifier of the inference endpoint., service: any # The type of service supported for the specified task type. In this case, `deepseek`., service_settings: any # Settings used to install the inference model. These settings are specific to the `deepseek` service.}
@optional {timeout: any # Specifies the amount of time to wait for the inference endpoint to be created.}
@returns(200)

@endpoint PUT /_inference/{task_type}/{elasticsearch_inference_id}
@desc Create an Elasticsearch inference endpoint
@required {task_type: str # The type of the inference task that the model will perform., elasticsearch_inference_id: str # The unique identifier of the inference endpoint. The must not match the `model_id`., service: any # The type of service supported for the specified task type. In this case, `elasticsearch`., service_settings: any # Settings used to install the inference model. These settings are specific to the `elasticsearch` service.}
@optional {timeout: any # Specifies the amount of time to wait for the inference endpoint to be created., chunking_settings: any # The chunking configuration object. Applies only to the `sparse_embedding` and `text_embedding` task types. Not applicable to the `rerank` task type., task_settings: any # Settings to configure the inference task. These settings are specific to the task type you specified.}
@returns(200)
@example_request "{\n    \"service\": \"elasticsearch\",\n    \"service_settings\": {\n        \"adaptive_allocations\": { \n        \"enabled\": true,\n        \"min_number_of_allocations\": 1,\n        \"max_number_of_allocations\": 4\n        },\n        \"num_threads\": 1,\n        \"model_id\": \".elser_model_2\" \n    }\n}"

@endpoint PUT /_inference/{task_type}/{elser_inference_id}
@desc Create an ELSER inference endpoint
@required {task_type: str # The type of the inference task that the model will perform., elser_inference_id: str # The unique identifier of the inference endpoint., service: any # The type of service supported for the specified task type. In this case, `elser`., service_settings: any # Settings used to install the inference model. These settings are specific to the `elser` service.}
@optional {timeout: any # Specifies the amount of time to wait for the inference endpoint to be created., chunking_settings: any # The chunking configuration object. Note that for ELSER endpoints, the max_chunk_size may not exceed `300`.}
@returns(200)
@example_request "{\n    \"service\": \"elser\",\n    \"service_settings\": {\n        \"num_allocations\": 1,\n        \"num_threads\": 1\n    }\n}"

@endpoint PUT /_inference/{task_type}/{fireworksai_inference_id}
@desc Create a Fireworks AI inference endpoint
@required {task_type: str # The type of the inference task that the model will perform., fireworksai_inference_id: str # The unique identifier of the inference endpoint., service: any # The type of service supported for the specified task type. In this case, `fireworksai`., service_settings: any # Settings used to install the inference model. These settings are specific to the `fireworksai` service.}
@optional {timeout: any # Specifies the amount of time to wait for the inference endpoint to be created., chunking_settings: any # The chunking configuration object. Applies only to the `text_embedding` task type. Not applicable to the `completion` or `chat_completion` task types., task_settings: any # Settings to configure the inference task. Applies only to the `completion` or `chat_completion` task types. Not applicable to the `text_embedding` task type. These settings are specific to the task type you specified.}
@returns(200)
@example_request "{\n    \"service\": \"fireworksai\",\n    \"service_settings\": {\n        \"api_key\": \"your-api-key\",\n        \"model_id\": \"fireworks/qwen3-embedding-8b\"\n    }\n}"

@endpoint PUT /_inference/{task_type}/{googleaistudio_inference_id}
@desc Create an Google AI Studio inference endpoint
@required {task_type: str # The type of the inference task that the model will perform., googleaistudio_inference_id: str # The unique identifier of the inference endpoint., service: any # The type of service supported for the specified task type. In this case, `googleaistudio`., service_settings: any # Settings used to install the inference model. These settings are specific to the `googleaistudio` service.}
@optional {timeout: any # Specifies the amount of time to wait for the inference endpoint to be created., chunking_settings: any # The chunking configuration object. Applies only to the `text_embedding` task type. Not applicable to the `completion` task type.}
@returns(200)
@example_request "{\n    \"service\": \"googleaistudio\",\n    \"service_settings\": {\n        \"api_key\": \"api-key\",\n        \"model_id\": \"model-id\"\n    }\n}"

@endpoint PUT /_inference/{task_type}/{googlevertexai_inference_id}
@desc Create a Google Vertex AI inference endpoint
@required {task_type: str # The type of the inference task that the model will perform., googlevertexai_inference_id: str # The unique identifier of the inference endpoint., service: any # The type of service supported for the specified task type. In this case, `googlevertexai`., service_settings: any # Settings used to install the inference model. These settings are specific to the `googlevertexai` service.}
@optional {timeout: any # Specifies the amount of time to wait for the inference endpoint to be created., chunking_settings: any # The chunking configuration object. Applies only to the `text_embedding` task type. Not applicable to the `rerank`, `completion`, or `chat_completion` task types., task_settings: any # Settings to configure the inference task. These settings are specific to the task type you specified.}
@returns(200)
@example_request "{\n    \"service\": \"googlevertexai\",\n    \"service_settings\": {\n        \"service_account_json\": \"service-account-json\",\n        \"model_id\": \"model-id\",\n        \"location\": \"location\",\n        \"project_id\": \"project-id\"\n    }\n}"

@endpoint PUT /_inference/{task_type}/{groq_inference_id}
@desc Create a Groq inference endpoint
@required {task_type: str # The type of the inference task that the model will perform., groq_inference_id: str # The unique identifier of the inference endpoint., service: any # The type of service supported for the specified task type. In this case, `groq`., service_settings: any # Settings used to install the inference model. These settings are specific to the `groq` service.}
@optional {timeout: any # Specifies the amount of time to wait for the inference endpoint to be created.}
@returns(200)
@example_request "{\n  \"service\": \"groq\",\n  \"service_settings\": {\n    \"api_key\": \"groq-api-key\",\n    \"model_id\": \"llama-3.3-70b-versatile\" \n  }\n}"

@endpoint PUT /_inference/{task_type}/{huggingface_inference_id}
@desc Create a Hugging Face inference endpoint
@required {task_type: str # The type of the inference task that the model will perform., huggingface_inference_id: str # The unique identifier of the inference endpoint., service: any # The type of service supported for the specified task type. In this case, `hugging_face`., service_settings: any # Settings used to install the inference model. These settings are specific to the `hugging_face` service.}
@optional {timeout: any # Specifies the amount of time to wait for the inference endpoint to be created., chunking_settings: any # The chunking configuration object. Applies only to the `text_embedding` task type. Not applicable to the `rerank`, `completion`, or `chat_completion` task types., task_settings: any # Settings to configure the inference task. These settings are specific to the task type you specified.}
@returns(200)
@example_request "{\n    \"service\": \"hugging_face\",\n    \"service_settings\": {\n        \"api_key\": \"hugging-face-access-token\", \n        \"url\": \"url-endpoint\" \n    }\n}"

@endpoint PUT /_inference/{task_type}/{jinaai_inference_id}
@desc Create an JinaAI inference endpoint
@required {task_type: str # The type of the inference task that the model will perform., jinaai_inference_id: str # The unique identifier of the inference endpoint., service: any # The type of service supported for the specified task type. In this case, `jinaai`., service_settings: any # Settings used to install the inference model. These settings are specific to the `jinaai` service.}
@optional {timeout: any # Specifies the amount of time to wait for the inference endpoint to be created., chunking_settings: any # The chunking configuration object. Applies only to the `embedding` and text_embedding` task types. Not applicable to the `rerank` task type., task_settings: any # Settings to configure the inference task. These settings are specific to the task type you specified.}
@returns(200)
@example_request "{\n    \"service\": \"jinaai\",\n    \"service_settings\": {\n        \"model_id\": \"jina-embeddings-v3\",\n        \"api_key\": \"JinaAi-Api-key\"\n    }\n}"

@endpoint PUT /_inference/{task_type}/{llama_inference_id}
@desc Create a Llama inference endpoint
@required {task_type: str # The type of the inference task that the model will perform., llama_inference_id: str # The unique identifier of the inference endpoint., service: any # The type of service supported for the specified task type. In this case, `llama`., service_settings: any # Settings used to install the inference model. These settings are specific to the `llama` service.}
@optional {timeout: any # Specifies the amount of time to wait for the inference endpoint to be created., chunking_settings: any # The chunking configuration object. Applies only to the `text_embedding` task type. Not applicable to the `completion` or `chat_completion` task types.}
@returns(200)
@example_request "{\n  \"service\": \"llama\",\n  \"service_settings\": {\n    \"url\": \"http://localhost:8321/v1/inference/embeddings\",\n    \"dimensions\": 384,\n    \"model_id\": \"all-MiniLM-L6-v2\" \n  }\n}"

@endpoint PUT /_inference/{task_type}/{mistral_inference_id}
@desc Create a Mistral inference endpoint
@required {task_type: str # The type of the inference task that the model will perform., mistral_inference_id: str # The unique identifier of the inference endpoint., service: any # The type of service supported for the specified task type. In this case, `mistral`., service_settings: any # Settings used to install the inference model. These settings are specific to the `mistral` service.}
@optional {timeout: any # Specifies the amount of time to wait for the inference endpoint to be created., chunking_settings: any # The chunking configuration object. Applies only to the `text_embedding` task type. Not applicable to the `completion` or `chat_completion` task types.}
@returns(200)
@example_request "{\n  \"service\": \"mistral\",\n  \"service_settings\": {\n    \"api_key\": \"Mistral-API-Key\",\n    \"model\": \"mistral-embed\" \n  }\n}"

@endpoint PUT /_inference/{task_type}/{nvidia_inference_id}
@desc Create an Nvidia inference endpoint
@required {task_type: str # The type of the inference task that the model will perform. NOTE: The `chat_completion` task type only supports streaming and only through the _stream API., nvidia_inference_id: str # The unique identifier of the inference endpoint., service: any # The type of service supported for the specified task type. In this case, `nvidia`., service_settings: any # Settings used to install the inference model. These settings are specific to the `nvidia` service.}
@optional {timeout: any # Specifies the amount of time to wait for the inference endpoint to be created., chunking_settings: any # The chunking configuration object. Applies only to the `text_embedding` task type. Not applicable to the `rerank`, `completion`, or `chat_completion` task types., task_settings: any # Settings to configure the inference task. Applies only to the `text_embedding` task type. Not applicable to the `rerank`, `completion`, or `chat_completion` task types. These settings are specific to the task type you specified.}
@returns(200)
@example_request "{\n    \"service\": \"nvidia\",\n    \"service_settings\": {\n        \"url\": \"nvidia-embeddings-url\",\n        \"api_key\": \"nvidia-embeddings-token\",\n        \"model_id\": \"nvidia/llama-3.2-nv-embedqa-1b-v2\"\n    }\n}"

@endpoint PUT /_inference/{task_type}/{openai_inference_id}
@desc Create an OpenAI inference endpoint
@required {task_type: str # The type of the inference task that the model will perform. NOTE: The `chat_completion` task type only supports streaming and only through the _stream API., openai_inference_id: str # The unique identifier of the inference endpoint., service: any # The type of service supported for the specified task type. In this case, `openai`., service_settings: any # Settings used to install the inference model. These settings are specific to the `openai` service.}
@optional {timeout: any # Specifies the amount of time to wait for the inference endpoint to be created., chunking_settings: any # The chunking configuration object. Applies only to the `text_embedding` task type. Not applicable to the `completion` or `chat_completion` task types., task_settings: any # Settings to configure the inference task. These settings are specific to the task type you specified.}
@returns(200)
@example_request "{\n    \"service\": \"openai\",\n    \"service_settings\": {\n        \"api_key\": \"OpenAI-API-Key\",\n        \"model_id\": \"text-embedding-3-small\",\n        \"dimensions\": 128\n    }\n}"

@endpoint PUT /_inference/{task_type}/{openshiftai_inference_id}
@desc Create an OpenShift AI inference endpoint
@required {task_type: str # The type of the inference task that the model will perform. NOTE: The `chat_completion` task type only supports streaming and only through the _stream API., openshiftai_inference_id: str # The unique identifier of the inference endpoint., service: any # The type of service supported for the specified task type. In this case, `openshift_ai`., service_settings: any # Settings used to install the inference model. These settings are specific to the `openshift_ai` service.}
@optional {timeout: any # Specifies the amount of time to wait for the inference endpoint to be created., chunking_settings: any # The chunking configuration object. Applies only to the `text_embedding` task type. Not applicable to the `rerank`, `completion`, or `chat_completion` task types., task_settings: any # Settings to configure the inference task. Applies only to the `rerank` task type. Not applicable to the `text_embedding`, `completion`, or `chat_completion` task types. These settings are specific to the task type you specified.}
@returns(200)
@example_request "{\n    \"service\": \"openshift_ai\",\n    \"service_settings\": {\n        \"url\": \"openshift-ai-embeddings-url\",\n        \"api_key\": \"openshift-ai-embeddings-token\",\n        \"model_id\": \"gritlm-7b\"\n    }\n}"

@endpoint PUT /_inference/{task_type}/{voyageai_inference_id}
@desc Create a VoyageAI inference endpoint
@required {task_type: str # The type of the inference task that the model will perform., voyageai_inference_id: str # The unique identifier of the inference endpoint., service: any # The type of service supported for the specified task type. In this case, `voyageai`., service_settings: any # Settings used to install the inference model. These settings are specific to the `voyageai` service.}
@optional {timeout: any # Specifies the amount of time to wait for the inference endpoint to be created., chunking_settings: any # The chunking configuration object. Applies only to the `text_embedding` task type. Not applicable to the `rerank` task type., task_settings: any # Settings to configure the inference task. These settings are specific to the task type you specified.}
@returns(200)
@example_request "{\n    \"service\": \"voyageai\",\n    \"service_settings\": {\n        \"model_id\": \"voyage-3-large\",\n        \"dimensions\": 512\n    }\n}"

@endpoint PUT /_inference/{task_type}/{watsonx_inference_id}
@desc Create a Watsonx inference endpoint
@required {task_type: str # The type of the inference task that the model will perform., watsonx_inference_id: str # The unique identifier of the inference endpoint., service: any # The type of service supported for the specified task type. In this case, `watsonxai`., service_settings: any # Settings used to install the inference model. These settings are specific to the `watsonxai` service.}
@optional {timeout: any # Specifies the amount of time to wait for the inference endpoint to be created., chunking_settings: any # The chunking configuration object. Applies only to the `text_embedding` task type. Not applicable to the `rerank`, `completion` or `chat_completion` task types.}
@returns(200)
@example_request "{\n  \"service\": \"watsonxai\",\n  \"service_settings\": {\n      \"api_key\": \"Watsonx-API-Key\", \n      \"url\": \"Wastonx-URL\", \n      \"model_id\": \"ibm/slate-30m-english-rtrvr\",\n      \"project_id\": \"IBM-Cloud-ID\", \n      \"api_version\": \"2024-03-14\"\n  }\n}"

@endpoint POST /_inference/rerank/{inference_id}
@desc Perform reranking inference on the service
@required {inference_id: str # The unique identifier for the inference endpoint., query: str # Query input., input: [str] # The documents to rank.}
@optional {timeout: any # The amount of time to wait for the inference request to complete., return_documents: bool # Include the document text in the response., top_n: num # Limit the response to the top N documents., task_settings: any # Task settings for the individual inference request. These settings are specific to the task type you specified and override the task settings specified when initializing the service.}
@returns(200) {rerank: [map]}
@example_request "{\n  \"input\": [\"luke\", \"like\", \"leia\", \"chewy\",\"r2d2\", \"star\", \"wars\"],\n  \"query\": \"star wars main character\"\n}"

@endpoint POST /_inference/sparse_embedding/{inference_id}
@desc Perform sparse embedding inference on the service
@required {inference_id: str # The inference Id, input: any # Inference input. Either a string or an array of strings.}
@optional {timeout: any # Specifies the amount of time to wait for the inference request to complete., task_settings: any # Task settings for the individual inference request. These settings are specific to the  you specified and override the task settings specified when initializing the service.}
@returns(200) {sparse_embedding: [map]}
@example_request "{\n  \"input\": \"The sky above the port was the color of television tuned to a dead channel.\"\n}"

@endpoint POST /_inference/completion/{inference_id}/_stream
@desc Perform streaming completion inference on the service
@required {inference_id: str # The unique identifier for the inference endpoint., input: any # The text on which you want to perform the inference task. It can be a single string or an array.  NOTE: Inference endpoints for the completion task type currently only support a single string as input.}
@optional {timeout: any # The amount of time to wait for the inference request to complete., task_settings: any # Task settings for the individual inference request. These settings are specific to the  you specified and override the task settings specified when initializing the service.}
@returns(200)
@example_request "{\n  \"input\": \"What is Elastic?\"\n}"

@endpoint POST /_inference/text_embedding/{inference_id}
@desc Perform text embedding inference on the service
@required {inference_id: str # The inference Id, input: any # Inference input. Either a string or an array of strings.}
@optional {timeout: any # Specifies the amount of time to wait for the inference request to complete., input_type: str # The input data type for the text embedding model. Possible values include: * `SEARCH` * `INGEST` * `CLASSIFICATION` * `CLUSTERING` Not all services support all values. Unsupported values will trigger a validation exception. Accepted values depend on the configured inference service, refer to the relevant service-specific documentation for more info.  > info > The `input_type` parameter specified on the root level of the request body will take precedence over the `input_type` parameter specified in `task_settings`., task_settings: any # Task settings for the individual inference request. These settings are specific to the  you specified and override the task settings specified when initializing the service.}
@returns(200) {text_embedding_bytes: [map], text_embedding_bits: [map], text_embedding: [map]}
@example_request "{\n  \"input\": \"The sky above the port was the color of television tuned to a dead channel.\",\n  \"input_type\": \"ingest\"\n}"

@endpoint PUT /_inference/{inference_id}/_update
@desc Update an inference endpoint
@required {inference_id: str # The unique identifier of the inference endpoint., service: str # The service type, service_settings: any # Settings specific to the service}
@optional {chunking_settings: any # The chunking configuration object. Applies only to the `embedding`, `sparse_embedding` and `text_embedding` task types. Not applicable to the `rerank`, `completion`, or `chat_completion` task types., task_settings: any # Task settings specific to the service and task type}
@returns(200)
@example_request "{\n \"service_settings\": {\n   \"api_key\": \"<API_KEY>\"\n },\n\"service\": \"example-service\"\n}"

@endpoint PUT /_inference/{task_type}/{inference_id}/_update
@desc Update an inference endpoint
@required {task_type: str # The type of inference task that the model performs., inference_id: str # The unique identifier of the inference endpoint., service: str # The service type, service_settings: any # Settings specific to the service}
@optional {chunking_settings: any # The chunking configuration object. Applies only to the `embedding`, `sparse_embedding` and `text_embedding` task types. Not applicable to the `rerank`, `completion`, or `chat_completion` task types., task_settings: any # Task settings specific to the service and task type}
@returns(200)
@example_request "{\n \"service_settings\": {\n   \"api_key\": \"<API_KEY>\"\n },\n\"service\": \"example-service\"\n}"

@endgroup

@group root
@endpoint GET /
@desc Get cluster info
@returns(200) {cluster_name: any, cluster_uuid: any, name: any, tagline: str, version: any}

@endpoint HEAD /
@desc Ping the cluster
@returns(200)

@endgroup

@group _ingest
@endpoint GET /_ingest/geoip/database/{id}
@desc Get GeoIP database configurations
@required {id: any # A comma-separated list of database configuration IDs to retrieve. Wildcard (`*`) expressions are supported. To get all database configurations, omit this parameter or use `*`.}
@returns(200) {databases: [map]}

@endpoint PUT /_ingest/geoip/database/{id}
@desc Create or update a GeoIP database configuration
@required {id: str # ID of the database configuration to create or update., name: any # The provider-assigned name of the IP geolocation database to download., maxmind: any # The configuration necessary to identify which IP geolocation provider to use to download the database, as well as any provider-specific configuration necessary for such downloading. At present, the only supported provider is maxmind, and the maxmind provider requires that an account_id (string) is configured.}
@optional {master_timeout: any # Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error., timeout: any # Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error.}
@returns(200) {acknowledged: bool}

@endpoint DELETE /_ingest/geoip/database/{id}
@desc Delete GeoIP database configurations
@required {id: any # A comma-separated list of geoip database configurations to delete}
@optional {master_timeout: any # The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error., timeout: any # The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error.}
@returns(200) {acknowledged: bool}

@endpoint GET /_ingest/ip_location/database/{id}
@desc Get IP geolocation database configurations
@required {id: any # Comma-separated list of database configuration IDs to retrieve. Wildcard (`*`) expressions are supported. To get all database configurations, omit this parameter or use `*`.}
@returns(200) {databases: [map]}

@endpoint PUT /_ingest/ip_location/database/{id}
@desc Create or update an IP geolocation database configuration
@required {id: str # The database configuration identifier.}
@optional {master_timeout: any # The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. A value of `-1` indicates that the request should never time out., timeout: any # The period to wait for a response from all relevant nodes in the cluster after updating the cluster metadata. If no response is received before the timeout expires, the cluster metadata update still applies but the response indicates that it was not completely acknowledged. A value of `-1` indicates that the request should never time out.}
@returns(200) {acknowledged: bool}
@example_request "{\n  \"name\": \"GeoIP2-Domain\",\n  \"maxmind\": {\n    \"account_id\": \"1234567\"\n  }\n}"

@endpoint DELETE /_ingest/ip_location/database/{id}
@desc Delete IP geolocation database configurations
@required {id: any # A comma-separated list of IP location database configurations.}
@optional {master_timeout: any # The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. A value of `-1` indicates that the request should never time out., timeout: any # The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. A value of `-1` indicates that the request should never time out.}
@returns(200) {acknowledged: bool}

@endpoint GET /_ingest/pipeline/{id}
@desc Get pipelines
@required {id: str # Comma-separated list of pipeline IDs to retrieve. Wildcard (`*`) expressions are supported. To get all ingest pipelines, omit this parameter or use `*`.}
@optional {master_timeout: any # Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error., summary: bool # Return pipelines without their definitions}
@returns(200)

@endpoint PUT /_ingest/pipeline/{id}
@desc Create or update a pipeline
@required {id: str # ID of the ingest pipeline to create or update.}
@optional {master_timeout: any # Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error., timeout: any # Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error., if_version: num # Required version for optimistic concurrency control for pipeline updates, _meta: any # Optional metadata about the ingest pipeline. May have any contents. This map is not automatically generated by Elasticsearch., description: str # Description of the ingest pipeline., on_failure: [map{append: any, attachment: any, bytes: any, cef: any, circle: any, community_id: any, convert: any, csv: any, date: any, date_index_name: any, dissect: any, dot_expander: any, drop: any, enrich: any, fail: any, fingerprint: any, foreach: any, ip_location: any, geo_grid: any, geoip: any, grok: any, gsub: any, html_strip: any, inference: any, join: any, json: any, kv: any, lowercase: any, network_direction: any, pipeline: any, redact: any, registered_domain: any, remove: any, rename: any, reroute: any, script: any, set: any, set_security_user: any, sort: any, split: any, terminate: any, trim: any, uppercase: any, urldecode: any, uri_parts: any, user_agent: any}] # Processors to run immediately after a processor failure. Each processor supports a processor-level `on_failure` value. If a processor without an `on_failure` value fails, Elasticsearch uses this pipeline-level parameter as a fallback. The processors in this parameter run sequentially in the order specified. Elasticsearch will not attempt to run the pipeline's remaining processors., processors: [map{append: any, attachment: any, bytes: any, cef: any, circle: any, community_id: any, convert: any, csv: any, date: any, date_index_name: any, dissect: any, dot_expander: any, drop: any, enrich: any, fail: any, fingerprint: any, foreach: any, ip_location: any, geo_grid: any, geoip: any, grok: any, gsub: any, html_strip: any, inference: any, join: any, json: any, kv: any, lowercase: any, network_direction: any, pipeline: any, redact: any, registered_domain: any, remove: any, rename: any, reroute: any, script: any, set: any, set_security_user: any, sort: any, split: any, terminate: any, trim: any, uppercase: any, urldecode: any, uri_parts: any, user_agent: any}] # Processors used to perform transformations on documents before indexing. Processors run sequentially in the order specified., version: any # Version number used by external systems to track ingest pipelines. This parameter is intended for external systems only. Elasticsearch does not use or validate pipeline version numbers., deprecated: bool=false # Marks this ingest pipeline as deprecated. When a deprecated ingest pipeline is referenced as the default or final pipeline when creating or updating a non-deprecated index template, Elasticsearch will emit a deprecation warning., field_access_pattern: any=classic # Controls how processors in this pipeline should read and write data on a document's source.}
@returns(200) {acknowledged: bool}
@example_request "{\n  \"description\" : \"My optional pipeline description\",\n  \"processors\" : [\n    {\n      \"set\" : {\n        \"description\" : \"My optional processor description\",\n        \"field\": \"my-keyword-field\",\n        \"value\": \"foo\"\n      }\n    }\n  ]\n}"

@endpoint DELETE /_ingest/pipeline/{id}
@desc Delete pipelines
@required {id: str # Pipeline ID or wildcard expression of pipeline IDs used to limit the request. To delete all ingest pipelines in a cluster, use a value of `*`.}
@optional {master_timeout: any # Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error., timeout: any # Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error.}
@returns(200) {acknowledged: bool}

@endpoint GET /_ingest/geoip/stats
@desc Get GeoIP statistics
@returns(200) {stats: any, nodes: map}

@endpoint GET /_ingest/geoip/database
@desc Get GeoIP database configurations
@returns(200) {databases: [map]}

@endpoint GET /_ingest/ip_location/database
@desc Get IP geolocation database configurations
@returns(200) {databases: [map]}

@endpoint GET /_ingest/pipeline
@desc Get pipelines
@optional {master_timeout: any # Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error., summary: bool # Return pipelines without their definitions}
@returns(200)

@endpoint GET /_ingest/processor/grok
@desc Run a grok processor
@returns(200) {patterns: map}

@endpoint GET /_ingest/pipeline/_simulate
@desc Simulate a pipeline
@required {docs: [map{_id: any, _index: any, _source!: map}] # Sample documents to test in the pipeline.}
@optional {verbose: bool # If `true`, the response includes output data for each processor in the executed pipeline., pipeline: any # The pipeline to test. If you don't specify the `pipeline` request path parameter, this parameter is required. If you specify both this and the request path parameter, the API only uses the request path parameter.}
@returns(200) {docs: [map]}
@example_request "{\n  \"pipeline\" :\n  {\n    \"description\": \"_description\",\n    \"processors\": [\n      {\n        \"set\" : {\n          \"field\" : \"field2\",\n          \"value\" : \"_value\"\n        }\n      }\n    ]\n  },\n  \"docs\": [\n    {\n      \"_index\": \"index\",\n      \"_id\": \"id\",\n      \"_source\": {\n        \"foo\": \"bar\"\n      }\n    },\n    {\n      \"_index\": \"index\",\n      \"_id\": \"id\",\n      \"_source\": {\n        \"foo\": \"rab\"\n      }\n    }\n  ]\n}"

@endpoint POST /_ingest/pipeline/_simulate
@desc Simulate a pipeline
@required {docs: [map{_id: any, _index: any, _source!: map}] # Sample documents to test in the pipeline.}
@optional {verbose: bool # If `true`, the response includes output data for each processor in the executed pipeline., pipeline: any # The pipeline to test. If you don't specify the `pipeline` request path parameter, this parameter is required. If you specify both this and the request path parameter, the API only uses the request path parameter.}
@returns(200) {docs: [map]}
@example_request "{\n  \"pipeline\" :\n  {\n    \"description\": \"_description\",\n    \"processors\": [\n      {\n        \"set\" : {\n          \"field\" : \"field2\",\n          \"value\" : \"_value\"\n        }\n      }\n    ]\n  },\n  \"docs\": [\n    {\n      \"_index\": \"index\",\n      \"_id\": \"id\",\n      \"_source\": {\n        \"foo\": \"bar\"\n      }\n    },\n    {\n      \"_index\": \"index\",\n      \"_id\": \"id\",\n      \"_source\": {\n        \"foo\": \"rab\"\n      }\n    }\n  ]\n}"

@endpoint GET /_ingest/pipeline/{id}/_simulate
@desc Simulate a pipeline
@required {id: str # The pipeline to test. If you don't specify a `pipeline` in the request body, this parameter is required., docs: [map{_id: any, _index: any, _source!: map}] # Sample documents to test in the pipeline.}
@optional {verbose: bool # If `true`, the response includes output data for each processor in the executed pipeline., pipeline: any # The pipeline to test. If you don't specify the `pipeline` request path parameter, this parameter is required. If you specify both this and the request path parameter, the API only uses the request path parameter.}
@returns(200) {docs: [map]}
@example_request "{\n  \"pipeline\" :\n  {\n    \"description\": \"_description\",\n    \"processors\": [\n      {\n        \"set\" : {\n          \"field\" : \"field2\",\n          \"value\" : \"_value\"\n        }\n      }\n    ]\n  },\n  \"docs\": [\n    {\n      \"_index\": \"index\",\n      \"_id\": \"id\",\n      \"_source\": {\n        \"foo\": \"bar\"\n      }\n    },\n    {\n      \"_index\": \"index\",\n      \"_id\": \"id\",\n      \"_source\": {\n        \"foo\": \"rab\"\n      }\n    }\n  ]\n}"

@endpoint POST /_ingest/pipeline/{id}/_simulate
@desc Simulate a pipeline
@required {id: str # The pipeline to test. If you don't specify a `pipeline` in the request body, this parameter is required., docs: [map{_id: any, _index: any, _source!: map}] # Sample documents to test in the pipeline.}
@optional {verbose: bool # If `true`, the response includes output data for each processor in the executed pipeline., pipeline: any # The pipeline to test. If you don't specify the `pipeline` request path parameter, this parameter is required. If you specify both this and the request path parameter, the API only uses the request path parameter.}
@returns(200) {docs: [map]}
@example_request "{\n  \"pipeline\" :\n  {\n    \"description\": \"_description\",\n    \"processors\": [\n      {\n        \"set\" : {\n          \"field\" : \"field2\",\n          \"value\" : \"_value\"\n        }\n      }\n    ]\n  },\n  \"docs\": [\n    {\n      \"_index\": \"index\",\n      \"_id\": \"id\",\n      \"_source\": {\n        \"foo\": \"bar\"\n      }\n    },\n    {\n      \"_index\": \"index\",\n      \"_id\": \"id\",\n      \"_source\": {\n        \"foo\": \"rab\"\n      }\n    }\n  ]\n}"

@endgroup

@group _license
@endpoint GET /_license
@desc Get license information
@optional {accept_enterprise: bool # If `true`, this parameter returns enterprise for Enterprise license types. If `false`, this parameter returns platinum for both platinum and enterprise license types. This behavior is maintained for backwards compatibility. This parameter is deprecated and will always be set to true in 8.x., local: bool # Specifies whether to retrieve local information. From 9.2 onwards the default value is `true`, which means the information is retrieved from the responding node. In earlier versions the default is `false`, which means the information is retrieved from the elected master node.}
@returns(200) {license: any}

@endpoint PUT /_license
@desc Update the license
@optional {acknowledge: bool # To update a license, you must accept the acknowledge messages and set this parameter to `true`. In particular, if you are upgrading or downgrading a license, you must acknowlege the feature changes., master_timeout: any # The period to wait for a connection to the master node., timeout: any # The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error., license: any, licenses: [map{expiry_date_in_millis!: any, issue_date_in_millis!: any, start_date_in_millis: any, issued_to!: str, issuer!: str, max_nodes: any, max_resource_units: num, signature!: str, type!: any, uid!: str}] # A sequence of one or more JSON documents containing the license information.}
@returns(200) {acknowledge: any, acknowledged: bool, license_status: any}
@example_request "{\n  \"licenses\": [\n    {\n      \"uid\":\"893361dc-9749-4997-93cb-802e3d7fa4xx\",\n      \"type\":\"basic\",\n      \"issue_date_in_millis\":1411948800000,\n      \"expiry_date_in_millis\":1914278399999,\n      \"max_nodes\":1,\n      \"issued_to\":\"issuedTo\",\n      \"issuer\":\"issuer\",\n      \"signature\":\"xx\"\n    }\n    ]\n}"

@endpoint POST /_license
@desc Update the license
@optional {acknowledge: bool # To update a license, you must accept the acknowledge messages and set this parameter to `true`. In particular, if you are upgrading or downgrading a license, you must acknowlege the feature changes., master_timeout: any # The period to wait for a connection to the master node., timeout: any # The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error., license: any, licenses: [map{expiry_date_in_millis!: any, issue_date_in_millis!: any, start_date_in_millis: any, issued_to!: str, issuer!: str, max_nodes: any, max_resource_units: num, signature!: str, type!: any, uid!: str}] # A sequence of one or more JSON documents containing the license information.}
@returns(200) {acknowledge: any, acknowledged: bool, license_status: any}
@example_request "{\n  \"licenses\": [\n    {\n      \"uid\":\"893361dc-9749-4997-93cb-802e3d7fa4xx\",\n      \"type\":\"basic\",\n      \"issue_date_in_millis\":1411948800000,\n      \"expiry_date_in_millis\":1914278399999,\n      \"max_nodes\":1,\n      \"issued_to\":\"issuedTo\",\n      \"issuer\":\"issuer\",\n      \"signature\":\"xx\"\n    }\n    ]\n}"

@endpoint DELETE /_license
@desc Delete the license
@optional {master_timeout: any # The period to wait for a connection to the master node., timeout: any # The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error.}
@returns(200) {acknowledged: bool}

@endpoint GET /_license/basic_status
@desc Get the basic license status
@returns(200) {eligible_to_start_basic: bool}

@endpoint GET /_license/trial_status
@desc Get the trial status
@returns(200) {eligible_to_start_trial: bool}

@endpoint POST /_license/start_basic
@desc Start a basic license
@optional {acknowledge: bool # To start a basic license, you must accept the acknowledge messages and set this parameter to `true`., master_timeout: any # Period to wait for a connection to the master node., timeout: any # Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error.}
@returns(200) {acknowledged: bool, basic_was_started: bool, error_message: str, type: any, acknowledge: map}

@endpoint POST /_license/start_trial
@desc Start a trial
@optional {acknowledge: bool # To start a trial, you must accept the acknowledge messages and set this parameter to `true`., type: str # The type of trial license to generate, master_timeout: any # Period to wait for a connection to the master node.}
@returns(200) {acknowledged: bool, error_message: str, trial_was_started: bool, type: any}

@endgroup

@group _logstash
@endpoint GET /_logstash/pipeline/{id}
@desc Get Logstash pipelines
@required {id: any # A comma-separated list of pipeline identifiers.}
@returns(200)

@endpoint PUT /_logstash/pipeline/{id}
@desc Create or update a Logstash pipeline
@required {id: str # An identifier for the pipeline. Pipeline IDs must begin with a letter or underscore and contain only letters, underscores, dashes, hyphens and numbers., description: str # A description of the pipeline. This description is not used by Elasticsearch or Logstash., last_modified: any # The date the pipeline was last updated. It must be in the `yyyy-MM-dd'T'HH:mm:ss.SSSZZ` strict_date_time format., pipeline: str # The configuration for the pipeline., pipeline_metadata: any # Optional metadata about the pipeline, which can have any contents. This metadata is not generated or used by Elasticsearch or Logstash., pipeline_settings: any # Settings for the pipeline. It supports only flat keys in dot notation., username: str # The user who last updated the pipeline.}
@returns(200)
@example_request "{\n  \"description\": \"Sample pipeline for illustration purposes\",\n  \"last_modified\": \"2021-01-02T02:50:51.250Z\",\n  \"pipeline_metadata\": {\n    \"type\": \"logstash_pipeline\",\n    \"version\": 1\n  },\n  \"username\": \"elastic\",\n  \"pipeline\": \"input {}\\\\n filter { grok {} }\\\\n output {}\",\n  \"pipeline_settings\": {\n    \"pipeline.workers\": 1,\n    \"pipeline.batch.size\": 125,\n    \"pipeline.batch.delay\": 50,\n    \"queue.type\": \"memory\",\n    \"queue.max_bytes\": \"1gb\",\n    \"queue.checkpoint.writes\": 1024\n  }\n}"

@endpoint DELETE /_logstash/pipeline/{id}
@desc Delete a Logstash pipeline
@required {id: str # An identifier for the pipeline.}
@returns(200)

@endpoint GET /_logstash/pipeline
@desc Get Logstash pipelines
@returns(200)

@endgroup

@group _mget
@endpoint GET /_mget
@desc Get multiple documents
@optional {preference: str # Specifies the node or shard the operation should be performed on. Random by default., realtime: bool # If `true`, the request is real-time as opposed to near-real-time., refresh: bool # If `true`, the request refreshes relevant shards before retrieving documents., routing: any # Custom value used to route operations to a specific shard., _source: any # True or false to return the `_source` field or not, or a list of fields to return., _source_excludes: any # A comma-separated list of source fields to exclude from the response. You can also use this parameter to exclude fields from the subset specified in `_source_includes` query parameter., _source_includes: any # A comma-separated list of source fields to include in the response. If this parameter is specified, only these source fields are returned. You can exclude fields from this subset using the `_source_excludes` query parameter. If the `_source` parameter is `false`, this parameter is ignored., stored_fields: any # If `true`, retrieves the document fields stored in the index rather than the document `_source`., docs: [map{_id!: any, _index: any, routing: any, _source: any, stored_fields: any, version: any, version_type: any}] # The documents you want to retrieve. Required if no index is specified in the request URI., ids: any # The IDs of the documents you want to retrieve. Allowed when the index is specified in the request URI.}
@returns(200) {docs: [any]}
@example_request "{\n  \"docs\": [\n    {\n      \"_id\": \"1\"\n    },\n    {\n      \"_id\": \"2\"\n    }\n  ]\n}"

@endpoint POST /_mget
@desc Get multiple documents
@optional {preference: str # Specifies the node or shard the operation should be performed on. Random by default., realtime: bool # If `true`, the request is real-time as opposed to near-real-time., refresh: bool # If `true`, the request refreshes relevant shards before retrieving documents., routing: any # Custom value used to route operations to a specific shard., _source: any # True or false to return the `_source` field or not, or a list of fields to return., _source_excludes: any # A comma-separated list of source fields to exclude from the response. You can also use this parameter to exclude fields from the subset specified in `_source_includes` query parameter., _source_includes: any # A comma-separated list of source fields to include in the response. If this parameter is specified, only these source fields are returned. You can exclude fields from this subset using the `_source_excludes` query parameter. If the `_source` parameter is `false`, this parameter is ignored., stored_fields: any # If `true`, retrieves the document fields stored in the index rather than the document `_source`., docs: [map{_id!: any, _index: any, routing: any, _source: any, stored_fields: any, version: any, version_type: any}] # The documents you want to retrieve. Required if no index is specified in the request URI., ids: any # The IDs of the documents you want to retrieve. Allowed when the index is specified in the request URI.}
@returns(200) {docs: [any]}
@example_request "{\n  \"docs\": [\n    {\n      \"_id\": \"1\"\n    },\n    {\n      \"_id\": \"2\"\n    }\n  ]\n}"

@endgroup

@group {index}
@endpoint GET /{index}/_mget
@desc Get multiple documents
@required {index: str # Name of the index to retrieve documents from when `ids` are specified, or when a document in the `docs` array does not specify an index.}
@optional {preference: str # Specifies the node or shard the operation should be performed on. Random by default., realtime: bool # If `true`, the request is real-time as opposed to near-real-time., refresh: bool # If `true`, the request refreshes relevant shards before retrieving documents., routing: any # Custom value used to route operations to a specific shard., _source: any # True or false to return the `_source` field or not, or a list of fields to return., _source_excludes: any # A comma-separated list of source fields to exclude from the response. You can also use this parameter to exclude fields from the subset specified in `_source_includes` query parameter., _source_includes: any # A comma-separated list of source fields to include in the response. If this parameter is specified, only these source fields are returned. You can exclude fields from this subset using the `_source_excludes` query parameter. If the `_source` parameter is `false`, this parameter is ignored., stored_fields: any # If `true`, retrieves the document fields stored in the index rather than the document `_source`., docs: [map{_id!: any, _index: any, routing: any, _source: any, stored_fields: any, version: any, version_type: any}] # The documents you want to retrieve. Required if no index is specified in the request URI., ids: any # The IDs of the documents you want to retrieve. Allowed when the index is specified in the request URI.}
@returns(200) {docs: [any]}
@example_request "{\n  \"docs\": [\n    {\n      \"_id\": \"1\"\n    },\n    {\n      \"_id\": \"2\"\n    }\n  ]\n}"

@endpoint POST /{index}/_mget
@desc Get multiple documents
@required {index: str # Name of the index to retrieve documents from when `ids` are specified, or when a document in the `docs` array does not specify an index.}
@optional {preference: str # Specifies the node or shard the operation should be performed on. Random by default., realtime: bool # If `true`, the request is real-time as opposed to near-real-time., refresh: bool # If `true`, the request refreshes relevant shards before retrieving documents., routing: any # Custom value used to route operations to a specific shard., _source: any # True or false to return the `_source` field or not, or a list of fields to return., _source_excludes: any # A comma-separated list of source fields to exclude from the response. You can also use this parameter to exclude fields from the subset specified in `_source_includes` query parameter., _source_includes: any # A comma-separated list of source fields to include in the response. If this parameter is specified, only these source fields are returned. You can exclude fields from this subset using the `_source_excludes` query parameter. If the `_source` parameter is `false`, this parameter is ignored., stored_fields: any # If `true`, retrieves the document fields stored in the index rather than the document `_source`., docs: [map{_id!: any, _index: any, routing: any, _source: any, stored_fields: any, version: any, version_type: any}] # The documents you want to retrieve. Required if no index is specified in the request URI., ids: any # The IDs of the documents you want to retrieve. Allowed when the index is specified in the request URI.}
@returns(200) {docs: [any]}
@example_request "{\n  \"docs\": [\n    {\n      \"_id\": \"1\"\n    },\n    {\n      \"_id\": \"2\"\n    }\n  ]\n}"

@endgroup

@group _migration
@endpoint GET /_migration/deprecations
@desc Get deprecation information
@returns(200) {cluster_settings: [map], index_settings: map, data_streams: map, node_settings: [map], ml_settings: [map], templates: map, ilm_policies: map}

@endgroup

@group {index}
@endpoint GET /{index}/_migration/deprecations
@desc Get deprecation information
@required {index: str # Comma-separate list of data streams or indices to check. Wildcard (*) expressions are supported.}
@returns(200) {cluster_settings: [map], index_settings: map, data_streams: map, node_settings: [map], ml_settings: [map], templates: map, ilm_policies: map}

@endgroup

@group _migration
@endpoint GET /_migration/system_features
@desc Get feature migration information
@returns(200) {features: [map], migration_status: any}

@endpoint POST /_migration/system_features
@desc Start the feature migration
@returns(200) {accepted: bool, features: [map], reason: str}

@endgroup

@group _ml
@endpoint POST /_ml/trained_models/{model_id}/deployment/cache/_clear
@desc Clear trained model deployment cache
@required {model_id: str # The unique identifier of the trained model.}
@returns(200) {cleared: bool}

@endpoint POST /_ml/anomaly_detectors/{job_id}/_close
@desc Close anomaly detection jobs
@required {job_id: str # Identifier for the anomaly detection job. It can be a job identifier, a group name, or a wildcard expression. You can close multiple anomaly detection jobs in a single API request by using a group name, a comma-separated list of jobs, or a wildcard expression. You can close all jobs by using `_all` or by specifying `*` as the job identifier.}
@optional {allow_no_match: bool # Specifies what to do when the request: contains wildcard expressions and there are no jobs that match; contains the  `_all` string or no identifiers and there are no matches; or contains wildcard expressions and there are only partial matches. By default, it returns an empty jobs array when there are no matches and the subset of results when there are partial matches. If `false`, the request returns a 404 status code when there are no matches or only partial matches., force: bool # Use to close a failed job, or to forcefully close a job which has not responded to its initial close request; the request returns without performing the associated actions such as flushing buffers and persisting the model snapshots. If you want the job to be in a consistent state after the close job API returns, do not set to `true`. This parameter should be used only in situations where the job has already failed or where you are not interested in results the job might have recently produced or might produce in the future., timeout: any # Controls the time to wait until a job has closed., allow_no_match: bool=true # Refer to the description for the `allow_no_match` query parameter., force: bool=false # Refer to the descriptiion for the `force` query parameter., timeout: any=30m # Refer to the description for the `timeout` query parameter.}
@returns(200) {closed: bool}

@endpoint GET /_ml/calendars/{calendar_id}
@desc Get calendar configuration info
@required {calendar_id: str # A string that uniquely identifies a calendar. You can get information for multiple calendars by using a comma-separated list of ids or a wildcard expression. You can get information for all calendars by using `_all` or `*` or by omitting the calendar identifier.}
@optional {from: num # Skips the specified number of calendars. This parameter is supported only when you omit the calendar identifier., size: num # Specifies the maximum number of calendars to obtain. This parameter is supported only when you omit the calendar identifier., page: any # This object is supported only when you omit the calendar identifier.}
@returns(200) {calendars: [map], count: num}

@endpoint PUT /_ml/calendars/{calendar_id}
@desc Create a calendar
@required {calendar_id: str # A string that uniquely identifies a calendar.}
@optional {job_ids: [str] # An array of anomaly detection job identifiers., description: str # A description of the calendar.}
@returns(200) {calendar_id: any, description: str, job_ids: any}

@endpoint POST /_ml/calendars/{calendar_id}
@desc Get calendar configuration info
@required {calendar_id: str # A string that uniquely identifies a calendar. You can get information for multiple calendars by using a comma-separated list of ids or a wildcard expression. You can get information for all calendars by using `_all` or `*` or by omitting the calendar identifier.}
@optional {from: num # Skips the specified number of calendars. This parameter is supported only when you omit the calendar identifier., size: num # Specifies the maximum number of calendars to obtain. This parameter is supported only when you omit the calendar identifier., page: any # This object is supported only when you omit the calendar identifier.}
@returns(200) {calendars: [map], count: num}

@endpoint DELETE /_ml/calendars/{calendar_id}
@desc Delete a calendar
@required {calendar_id: str # A string that uniquely identifies a calendar.}
@returns(200) {acknowledged: bool}

@endpoint DELETE /_ml/calendars/{calendar_id}/events/{event_id}
@desc Delete events from a calendar
@required {calendar_id: str # A string that uniquely identifies a calendar., event_id: str # Identifier for the scheduled event. You can obtain this identifier by using the get calendar events API.}
@returns(200) {acknowledged: bool}

@endpoint PUT /_ml/calendars/{calendar_id}/jobs/{job_id}
@desc Add anomaly detection job to calendar
@required {calendar_id: str # A string that uniquely identifies a calendar., job_id: any # An identifier for the anomaly detection jobs. It can be a job identifier, a group name, or a comma-separated list of jobs or groups.}
@returns(200) {calendar_id: any, description: str, job_ids: any}

@endpoint DELETE /_ml/calendars/{calendar_id}/jobs/{job_id}
@desc Delete anomaly jobs from a calendar
@required {calendar_id: str # A string that uniquely identifies a calendar., job_id: any # An identifier for the anomaly detection jobs. It can be a job identifier, a group name, or a comma-separated list of jobs or groups.}
@returns(200) {calendar_id: any, description: str, job_ids: any}

@endpoint GET /_ml/data_frame/analytics/{id}
@desc Get data frame analytics job configuration info
@required {id: str # Identifier for the data frame analytics job. If you do not specify this option, the API returns information for the first hundred data frame analytics jobs.}
@optional {allow_no_match: bool # Specifies what to do when the request:  1. Contains wildcard expressions and there are no data frame analytics jobs that match. 2. Contains the `_all` string or no identifiers and there are no matches. 3. Contains wildcard expressions and there are only partial matches.  The default value returns an empty data_frame_analytics array when there are no matches and the subset of results when there are partial matches. If this parameter is `false`, the request returns a 404 status code when there are no matches or only partial matches., from: num # Skips the specified number of data frame analytics jobs., size: num # Specifies the maximum number of data frame analytics jobs to obtain., exclude_generated: bool # Indicates if certain fields should be removed from the configuration on retrieval. This allows the configuration to be in an acceptable format to be retrieved and then added to another cluster.}
@returns(200) {count: num, data_frame_analytics: [map]}

@endpoint PUT /_ml/data_frame/analytics/{id}
@desc Create a data frame analytics job
@required {id: str # Identifier for the data frame analytics job. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. It must start and end with alphanumeric characters., analysis: any # The analysis configuration, which contains the information necessary to perform one of the following types of analysis: classification, outlier detection, or regression., dest: any # The destination configuration., source: any # The configuration of how to source the analysis data.}
@optional {allow_lazy_start: bool=false # Specifies whether this job can start when there is insufficient machine learning node capacity for it to be immediately assigned to a node. If set to `false` and a machine learning node with capacity to run the job cannot be immediately found, the API returns an error. If set to `true`, the API does not return an error; the job waits in the `starting` state until sufficient machine learning node capacity is available. This behavior is also affected by the cluster-wide `xpack.ml.max_lazy_ml_nodes` setting., analyzed_fields: any # Specifies `includes` and/or `excludes` patterns to select which fields will be included in the analysis. The patterns specified in `excludes` are applied last, therefore `excludes` takes precedence. In other words, if the same field is specified in both `includes` and `excludes`, then the field will not be included in the analysis. If `analyzed_fields` is not set, only the relevant fields will be included. For example, all the numeric fields for outlier detection. The supported fields vary for each type of analysis. Outlier detection requires numeric or `boolean` data to analyze. The algorithms don’t support missing values therefore fields that have data types other than numeric or boolean are ignored. Documents where included fields contain missing values, null values, or an array are also ignored. Therefore the `dest` index may contain documents that don’t have an outlier score. Regression supports fields that are numeric, `boolean`, `text`, `keyword`, and `ip` data types. It is also tolerant of missing values. Fields that are supported are included in the analysis, other fields are ignored. Documents where included fields contain an array with two or more values are also ignored. Documents in the `dest` index that don’t contain a results field are not included in the regression analysis. Classification supports fields that are numeric, `boolean`, `text`, `keyword`, and `ip` data types. It is also tolerant of missing values. Fields that are supported are included in the analysis, other fields are ignored. Documents where included fields contain an array with two or more values are also ignored. Documents in the `dest` index that don’t contain a results field are not included in the classification analysis. Classification analysis can be improved by mapping ordinal variable values to a single number. For example, in case of age ranges, you can model the values as `0-14 = 0`, `15-24 = 1`, `25-34 = 2`, and so on., description: str # A description of the job., max_num_threads: num=1 # The maximum number of threads to be used by the analysis. Using more threads may decrease the time necessary to complete the analysis at the cost of using more CPU. Note that the process may use additional threads for operational functionality other than the analysis itself., _meta: any, model_memory_limit: str=1gb # The approximate maximum amount of memory resources that are permitted for analytical processing. If your `elasticsearch.yml` file contains an `xpack.ml.max_model_memory_limit` setting, an error occurs when you try to create data frame analytics jobs that have `model_memory_limit` values greater than that setting., headers: any, version: any}
@returns(200) {authorization: any, allow_lazy_start: bool, analysis: any, analyzed_fields: any, create_time: any, description: str, dest: any, id: any, max_num_threads: num, _meta: any, model_memory_limit: str, source: any, version: any}
@example_request "{\n  \"source\": {\n    \"index\": [\n      \"kibana_sample_data_flights\"\n    ],\n    \"query\": {\n      \"range\": {\n        \"DistanceKilometers\": {\n          \"gt\": 0\n        }\n      }\n    },\n    \"_source\": {\n      \"includes\": [],\n      \"excludes\": [\n        \"FlightDelay\",\n        \"FlightDelayType\"\n      ]\n    }\n  },\n  \"dest\": {\n    \"index\": \"df-flight-delays\",\n    \"results_field\": \"ml-results\"\n  },\n  \"analysis\": {\n  \"regression\": {\n    \"dependent_variable\": \"FlightDelayMin\",\n    \"training_percent\": 90\n    }\n  },\n  \"analyzed_fields\": {\n    \"includes\": [],\n    \"excludes\": [\n      \"FlightNum\"\n    ]\n  },\n  \"model_memory_limit\": \"100mb\"\n}"

@endpoint DELETE /_ml/data_frame/analytics/{id}
@desc Delete a data frame analytics job
@required {id: str # Identifier for the data frame analytics job.}
@optional {force: bool # If `true`, it deletes a job that is not stopped; this method is quicker than stopping and deleting the job., timeout: any # The time to wait for the job to be deleted.}
@returns(200) {acknowledged: bool}

@endpoint GET /_ml/datafeeds/{datafeed_id}
@desc Get datafeeds configuration info
@required {datafeed_id: any # Identifier for the datafeed. It can be a datafeed identifier or a wildcard expression. If you do not specify one of these options, the API returns information about all datafeeds.}
@optional {allow_no_match: bool # Specifies what to do when the request:  1. Contains wildcard expressions and there are no datafeeds that match. 2. Contains the `_all` string or no identifiers and there are no matches. 3. Contains wildcard expressions and there are only partial matches.  The default value is `true`, which returns an empty `datafeeds` array when there are no matches and the subset of results when there are partial matches. If this parameter is `false`, the request returns a `404` status code when there are no matches or only partial matches., exclude_generated: bool # Indicates if certain fields should be removed from the configuration on retrieval. This allows the configuration to be in an acceptable format to be retrieved and then added to another cluster.}
@returns(200) {count: num, datafeeds: [map]}

@endpoint PUT /_ml/datafeeds/{datafeed_id}
@desc Create a datafeed
@required {datafeed_id: str # A numerical character string that uniquely identifies the datafeed. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. It must start and end with alphanumeric characters.}
@optional {allow_no_indices: bool # A setting that does two separate checks on the index expression. If `false`, the request returns an error (1) if any wildcard expression (including `_all` and `*`) resolves to zero matching indices or (2) if the complete set of resolved indices, aliases or data streams is empty after all expressions are evaluated. If `true`, index expressions that resolve to no indices are allowed and the request returns an empty result., expand_wildcards: any # Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values., ignore_throttled: bool # If true, concrete, expanded, or aliased indices are ignored when frozen., ignore_unavailable: bool # If `false`, the request returns an error if it targets a concrete (non-wildcarded) index, alias, or data stream that is missing, closed, or otherwise unavailable. If `true`, unavailable concrete targets are silently ignored., aggregations: map # If set, the datafeed performs aggregation searches. Support for aggregations is limited and should be used only with low cardinality data., chunking_config: any # Datafeeds might be required to search over long time periods, for several months or years. This search is split into time chunks in order to ensure the load on Elasticsearch is managed. Chunking configuration controls how the size of these time chunks are calculated; it is an advanced configuration option., delayed_data_check_config: any # Specifies whether the datafeed checks for missing data and the size of the window. The datafeed can optionally search over indices that have already been read in an effort to determine whether any data has subsequently been added to the index. If missing data is found, it is a good indication that the `query_delay` is set too low and the data is being indexed after the datafeed has passed that moment in time. This check runs only on real-time datafeeds., frequency: any # The interval at which scheduled queries are made while the datafeed runs in real time. The default value is either the bucket span for short bucket spans, or, for longer bucket spans, a sensible fraction of the bucket span. When `frequency` is shorter than the bucket span, interim results for the last (partial) bucket are written then eventually overwritten by the full bucket results. If the datafeed uses aggregations, this value must be divisible by the interval of the date histogram aggregation., indices: any # An array of index names. Wildcards are supported. If any of the indices are in remote clusters, the master nodes and the machine learning nodes must have the `remote_cluster_client` role., indices_options: any # Specifies index expansion options that are used during search, job_id: any # Identifier for the anomaly detection job., max_empty_searches: num # If a real-time datafeed has never seen any data (including during any initial training period), it automatically stops and closes the associated job after this many real-time searches return no documents. In other words, it stops after `frequency` times `max_empty_searches` of real-time operation. If not set, a datafeed with no end time that sees no data remains started until it is explicitly stopped. By default, it is not set., query: any={"match_all": {"boost": 1}} # The Elasticsearch query domain-specific language (DSL). This value corresponds to the query object in an Elasticsearch search POST body. All the options that are supported by Elasticsearch can be used, as this object is passed verbatim to Elasticsearch., query_delay: any # The number of seconds behind real time that data is queried. For example, if data from 10:04 a.m. might not be searchable in Elasticsearch until 10:06 a.m., set this property to 120 seconds. The default value is randomly selected between `60s` and `120s`. This randomness improves the query performance when there are multiple jobs running on the same node., runtime_mappings: any # Specifies runtime fields for the datafeed search., script_fields: map # Specifies scripts that evaluate custom expressions and returns script fields to the datafeed. The detector configuration objects in a job can contain functions that use these script fields., scroll_size: num=1000 # The size parameter that is used in Elasticsearch searches when the datafeed does not use aggregations. The maximum value is the value of `index.max_result_window`, which is 10,000 by default., headers: any}
@returns(200) {aggregations: map, authorization: any, chunking_config: any, delayed_data_check_config: any, datafeed_id: any, frequency: any, indices: [str], job_id: any, indices_options: any, max_empty_searches: num, query: any, query_delay: any, runtime_mappings: any, script_fields: map, scroll_size: num}
@example_request "{\n  \"indices\": [\n    \"kibana_sample_data_logs\"\n  ],\n  \"query\": {\n    \"bool\": {\n      \"must\": [\n        {\n          \"match_all\": {}\n        }\n      ]\n    }\n  },\n  \"job_id\": \"test-job\"\n}"

@endpoint DELETE /_ml/datafeeds/{datafeed_id}
@desc Delete a datafeed
@required {datafeed_id: str # A numerical character string that uniquely identifies the datafeed. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. It must start and end with alphanumeric characters.}
@optional {force: bool # Use to forcefully delete a started datafeed; this method is quicker than stopping and deleting the datafeed.}
@returns(200) {acknowledged: bool}

@endpoint DELETE /_ml/_delete_expired_data/{job_id}
@desc Delete expired ML data
@required {job_id: str # Identifier for an anomaly detection job. It can be a job identifier, a group name, or a wildcard expression.}
@optional {requests_per_second: num # The desired requests per second for the deletion processes. The default behavior is no throttling., timeout: any # How long can the underlying delete processes run until they are canceled., requests_per_second: num # The desired requests per second for the deletion processes. The default behavior is no throttling., timeout: any=8h # How long can the underlying delete processes run until they are canceled.}
@returns(200) {deleted: bool}

@endpoint DELETE /_ml/_delete_expired_data
@desc Delete expired ML data
@optional {requests_per_second: num # The desired requests per second for the deletion processes. The default behavior is no throttling., timeout: any # How long can the underlying delete processes run until they are canceled., requests_per_second: num # The desired requests per second for the deletion processes. The default behavior is no throttling., timeout: any=8h # How long can the underlying delete processes run until they are canceled.}
@returns(200) {deleted: bool}

@endpoint GET /_ml/filters/{filter_id}
@desc Get filters
@required {filter_id: any # A string that uniquely identifies a filter.}
@optional {from: num # Skips the specified number of filters., size: num # Specifies the maximum number of filters to obtain.}
@returns(200) {count: num, filters: [map]}

@endpoint PUT /_ml/filters/{filter_id}
@desc Create a filter
@required {filter_id: str # A string that uniquely identifies a filter.}
@optional {description: str # A description of the filter., items: [str] # The items of the filter. A wildcard `*` can be used at the beginning or the end of an item. Up to 10000 items are allowed in each filter.}
@returns(200) {description: str, filter_id: any, items: [str]}
@example_request "{\n  \"description\": \"A list of safe domains\",\n  \"items\": [\"*.google.com\", \"wikipedia.org\"]\n}"

@endpoint DELETE /_ml/filters/{filter_id}
@desc Delete a filter
@required {filter_id: str # A string that uniquely identifies a filter.}
@returns(200) {acknowledged: bool}

@endpoint POST /_ml/anomaly_detectors/{job_id}/_forecast
@desc Predict future behavior of a time series
@required {job_id: str # Identifier for the anomaly detection job. The job must be open when you create a forecast; otherwise, an error occurs.}
@optional {duration: any # A period of time that indicates how far into the future to forecast. For example, `30d` corresponds to 30 days. The forecast starts at the last record that was processed., expires_in: any # The period of time that forecast results are retained. After a forecast expires, the results are deleted. If set to a value of 0, the forecast is never automatically deleted., max_model_memory: str # The maximum memory the forecast can use. If the forecast needs to use more than the provided amount, it will spool to disk. Default is 20mb, maximum is 500mb and minimum is 1mb. If set to 40% or more of the job’s configured memory limit, it is automatically reduced to below that amount., duration: any=1d # Refer to the description for the `duration` query parameter., expires_in: any=14d # Refer to the description for the `expires_in` query parameter., max_model_memory: str=20mb # Refer to the description for the `max_model_memory` query parameter.}
@returns(200) {acknowledged: bool, forecast_id: any}
@example_request "{\n  \"duration\": \"10d\"\n}"

@endpoint DELETE /_ml/anomaly_detectors/{job_id}/_forecast
@desc Delete forecasts from a job
@required {job_id: str # Identifier for the anomaly detection job.}
@optional {allow_no_forecasts: bool # Specifies whether an error occurs when there are no forecasts. In particular, if this parameter is set to `false` and there are no forecasts associated with the job, attempts to delete all forecasts return an error., timeout: any # Specifies the period of time to wait for the completion of the delete operation. When this period of time elapses, the API fails and returns an error.}
@returns(200) {acknowledged: bool}

@endpoint DELETE /_ml/anomaly_detectors/{job_id}/_forecast/{forecast_id}
@desc Delete forecasts from a job
@required {job_id: str # Identifier for the anomaly detection job., forecast_id: str # A comma-separated list of forecast identifiers. If you do not specify this optional parameter or if you specify `_all` or `*` the API deletes all forecasts from the job.}
@optional {allow_no_forecasts: bool # Specifies whether an error occurs when there are no forecasts. In particular, if this parameter is set to `false` and there are no forecasts associated with the job, attempts to delete all forecasts return an error., timeout: any # Specifies the period of time to wait for the completion of the delete operation. When this period of time elapses, the API fails and returns an error.}
@returns(200) {acknowledged: bool}

@endpoint GET /_ml/anomaly_detectors/{job_id}
@desc Get anomaly detection jobs configuration info
@required {job_id: any # Identifier for the anomaly detection job. It can be a job identifier, a group name, or a wildcard expression. If you do not specify one of these options, the API returns information for all anomaly detection jobs.}
@optional {allow_no_match: bool # Specifies what to do when the request:  1. Contains wildcard expressions and there are no jobs that match. 2. Contains the _all string or no identifiers and there are no matches. 3. Contains wildcard expressions and there are only partial matches.  The default value is `true`, which returns an empty `jobs` array when there are no matches and the subset of results when there are partial matches. If this parameter is `false`, the request returns a `404` status code when there are no matches or only partial matches., exclude_generated: bool # Indicates if certain fields should be removed from the configuration on retrieval. This allows the configuration to be in an acceptable format to be retrieved and then added to another cluster.}
@returns(200) {count: num, jobs: [map]}

@endpoint PUT /_ml/anomaly_detectors/{job_id}
@desc Create an anomaly detection job
@required {job_id: str # The identifier for the anomaly detection job. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. It must start and end with alphanumeric characters., analysis_config: any # Specifies how to analyze the data. After you create a job, you cannot change the analysis configuration; all the properties are informational., data_description: any # Defines the format of the input data when you send data to the job by using the post data API. Note that when configure a datafeed, these properties are automatically set. When data is received via the post data API, it is not stored in Elasticsearch. Only the results for anomaly detection are retained.}
@optional {allow_no_indices: bool # A setting that does two separate checks on the index expression. If `false`, the request returns an error (1) if any wildcard expression (including `_all` and `*`) resolves to zero matching indices or (2) if the complete set of resolved indices, aliases or data streams is empty after all expressions are evaluated. If `true`, index expressions that resolve to no indices are allowed and the request returns an empty result., expand_wildcards: any # Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values., ignore_throttled: bool # If `true`, concrete, expanded or aliased indices are ignored when frozen., ignore_unavailable: bool # If `false`, the request returns an error if it targets a concrete (non-wildcarded) index, alias, or data stream that is missing, closed, or otherwise unavailable. If `true`, unavailable concrete targets are silently ignored., allow_lazy_open: bool=false # Advanced configuration option. Specifies whether this job can open when there is insufficient machine learning node capacity for it to be immediately assigned to a node. By default, if a machine learning node with capacity to run the job cannot immediately be found, the open anomaly detection jobs API returns an error. However, this is also subject to the cluster-wide `xpack.ml.max_lazy_ml_nodes` setting. If this option is set to true, the open anomaly detection jobs API does not return an error and the job waits in the opening state until sufficient machine learning node capacity is available., analysis_limits: any # Limits can be applied for the resources required to hold the mathematical models in memory. These limits are approximate and can be set per job. They do not control the memory used by other processes, for example the Elasticsearch Java processes., background_persist_interval: any # Advanced configuration option. The time between each periodic persistence of the model. The default value is a randomized value between 3 to 4 hours, which avoids all jobs persisting at exactly the same time. The smallest allowed value is 1 hour. For very large models (several GB), persistence could take 10-20 minutes, so do not set the `background_persist_interval` value too low., custom_settings: any # Advanced configuration option. Contains custom meta data about the job., daily_model_snapshot_retention_after_days: num=1 # Advanced configuration option, which affects the automatic removal of old model snapshots for this job. It specifies a period of time (in days) after which only the first snapshot per day is retained. This period is relative to the timestamp of the most recent snapshot for this job. Valid values range from 0 to `model_snapshot_retention_days`., datafeed_config: any # Defines a datafeed for the anomaly detection job. If Elasticsearch security features are enabled, your datafeed remembers which roles the user who created it had at the time of creation and runs the query using those same roles. If you provide secondary authorization headers, those credentials are used instead., description: str # A description of the job., job_id: any # The identifier for the anomaly detection job. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. It must start and end with alphanumeric characters., groups: [str] # A list of job groups. A job can belong to no groups or many., model_plot_config: any # This advanced configuration option stores model information along with the results. It provides a more detailed view into anomaly detection. If you enable model plot it can add considerable overhead to the performance of the system; it is not feasible for jobs with many entities. Model plot provides a simplified and indicative view of the model and its bounds. It does not display complex features such as multivariate correlations or multimodal data. As such, anomalies may occasionally be reported which cannot be seen in the model plot. Model plot config can be configured when the job is created or updated later. It must be disabled if performance issues are experienced., model_snapshot_retention_days: num=10 # Advanced configuration option, which affects the automatic removal of old model snapshots for this job. It specifies the maximum period of time (in days) that snapshots are retained. This period is relative to the timestamp of the most recent snapshot for this job. By default, snapshots ten days older than the newest snapshot are deleted., renormalization_window_days: num # Advanced configuration option. The period over which adjustments to the score are applied, as new data is seen. The default value is the longer of 30 days or 100 bucket spans., results_index_name: any=shared # A text string that affects the name of the machine learning results index. By default, the job generates an index named `.ml-anomalies-shared`., results_retention_days: num # Advanced configuration option. The period of time (in days) that results are retained. Age is calculated relative to the timestamp of the latest bucket result. If this property has a non-null value, once per day at 00:30 (server time), results that are the specified number of days older than the latest bucket result are deleted from Elasticsearch. The default value is null, which means all results are retained. Annotations generated by the system also count as results for retention purposes; they are deleted after the same number of days as results. Annotations added by users are retained forever.}
@returns(200) {allow_lazy_open: bool, analysis_config: any, analysis_limits: any, background_persist_interval: any, create_time: any, custom_settings: any, daily_model_snapshot_retention_after_days: num, data_description: any, datafeed_config: any, description: str, groups: [str], job_id: any, job_type: str, job_version: str, model_plot_config: any, model_snapshot_id: any, model_snapshot_retention_days: num, renormalization_window_days: num, results_index_name: str, results_retention_days: num}
@example_request "{\n  \"analysis_config\": {\n    \"bucket_span\": \"15m\",\n    \"detectors\": [\n      {\n        \"detector_description\": \"Sum of bytes\",\n        \"function\": \"sum\",\n        \"field_name\": \"bytes\"\n      }\n    ]\n  },\n  \"data_description\": {\n    \"time_field\": \"timestamp\",\n    \"time_format\": \"epoch_ms\"\n  },\n  \"analysis_limits\": {\n    \"model_memory_limit\": \"11MB\"\n  },\n  \"model_plot_config\": {\n    \"enabled\": true,\n    \"annotations_enabled\": true\n  },\n  \"results_index_name\": \"test-job1\",\n  \"datafeed_config\": {\n    \"indices\": [\n      \"kibana_sample_data_logs\"\n    ],\n    \"query\": {\n      \"bool\": {\n        \"must\": [\n          {\n            \"match_all\": {}\n          }\n        ]\n      }\n    },\n    \"runtime_mappings\": {\n      \"hour_of_day\": {\n        \"type\": \"long\",\n        \"script\": {\n          \"source\": \"emit(doc['timestamp'].value.getHour());\"\n        }\n      }\n    },\n    \"datafeed_id\": \"datafeed-test-job1\"\n  }\n}"

@endpoint DELETE /_ml/anomaly_detectors/{job_id}
@desc Delete an anomaly detection job
@required {job_id: str # Identifier for the anomaly detection job.}
@optional {force: bool # Use to forcefully delete an opened job; this method is quicker than closing and deleting the job., delete_user_annotations: bool # Specifies whether annotations that have been added by the user should be deleted along with any auto-generated annotations when the job is reset., wait_for_completion: bool # Specifies whether the request should return immediately or wait until the job deletion completes.}
@returns(200) {acknowledged: bool}

@endpoint GET /_ml/anomaly_detectors/{job_id}/model_snapshots/{snapshot_id}
@desc Get model snapshots info
@required {job_id: str # Identifier for the anomaly detection job., snapshot_id: str # A numerical character string that uniquely identifies the model snapshot. You can get information for multiple snapshots by using a comma-separated list or a wildcard expression. You can get all snapshots by using `_all`, by specifying `*` as the snapshot ID, or by omitting the snapshot ID.}
@optional {desc: bool # If true, the results are sorted in descending order., end: any # Returns snapshots with timestamps earlier than this time., from: num # Skips the specified number of snapshots., size: num # Specifies the maximum number of snapshots to obtain., sort: str # Specifies the sort field for the requested snapshots. By default, the snapshots are sorted by their timestamp., start: any # Returns snapshots with timestamps after this time., desc: bool=false # Refer to the description for the `desc` query parameter., end: any # Refer to the description for the `end` query parameter., page: any, sort: any # Refer to the description for the `sort` query parameter., start: any # Refer to the description for the `start` query parameter.}
@returns(200) {count: num, model_snapshots: [map]}
@example_request "{\n  \"start\": \"1575402236000\"\n}"

@endpoint POST /_ml/anomaly_detectors/{job_id}/model_snapshots/{snapshot_id}
@desc Get model snapshots info
@required {job_id: str # Identifier for the anomaly detection job., snapshot_id: str # A numerical character string that uniquely identifies the model snapshot. You can get information for multiple snapshots by using a comma-separated list or a wildcard expression. You can get all snapshots by using `_all`, by specifying `*` as the snapshot ID, or by omitting the snapshot ID.}
@optional {desc: bool # If true, the results are sorted in descending order., end: any # Returns snapshots with timestamps earlier than this time., from: num # Skips the specified number of snapshots., size: num # Specifies the maximum number of snapshots to obtain., sort: str # Specifies the sort field for the requested snapshots. By default, the snapshots are sorted by their timestamp., start: any # Returns snapshots with timestamps after this time., desc: bool=false # Refer to the description for the `desc` query parameter., end: any # Refer to the description for the `end` query parameter., page: any, sort: any # Refer to the description for the `sort` query parameter., start: any # Refer to the description for the `start` query parameter.}
@returns(200) {count: num, model_snapshots: [map]}
@example_request "{\n  \"start\": \"1575402236000\"\n}"

@endpoint DELETE /_ml/anomaly_detectors/{job_id}/model_snapshots/{snapshot_id}
@desc Delete a model snapshot
@required {job_id: str # Identifier for the anomaly detection job., snapshot_id: str # Identifier for the model snapshot.}
@returns(200) {acknowledged: bool}

@endpoint GET /_ml/trained_models/{model_id}
@desc Get trained model configuration info
@required {model_id: any # The unique identifier of the trained model or a model alias.  You can get information for multiple trained models in a single API request by using a comma-separated list of model IDs or a wildcard expression.}
@optional {allow_no_match: bool # Specifies what to do when the request:  - Contains wildcard expressions and there are no models that match. - Contains the _all string or no identifiers and there are no matches. - Contains wildcard expressions and there are only partial matches.  If true, it returns an empty array when there are no matches and the subset of results when there are partial matches., decompress_definition: bool # Specifies whether the included model definition should be returned as a JSON map (true) or in a custom compressed format (false)., exclude_generated: bool # Indicates if certain fields should be removed from the configuration on retrieval. This allows the configuration to be in an acceptable format to be retrieved and then added to another cluster., from: num # Skips the specified number of models., include: str # A comma delimited string of optional fields to include in the response body., size: num # Specifies the maximum number of models to obtain., tags: any # A comma delimited string of tags. A trained model can have many tags, or none. When supplied, only trained models that contain all the supplied tags are returned.}
@returns(200) {count: num, trained_model_configs: [map]}

@endpoint PUT /_ml/trained_models/{model_id}
@desc Create a trained model
@required {model_id: str # The unique identifier of the trained model.}
@optional {defer_definition_decompression: bool # If set to `true` and a `compressed_definition` is provided, the request defers definition decompression and skips relevant validations., wait_for_completion: bool # Whether to wait for all child operations (e.g. model download) to complete., compressed_definition: str # The compressed (GZipped and Base64 encoded) inference definition of the model. If compressed_definition is specified, then definition cannot be specified., definition: any # The inference definition for the model. If definition is specified, then compressed_definition cannot be specified., description: str # A human-readable description of the inference trained model., inference_config: any # The default configuration for inference. This can be either a regression or classification configuration. It must match the underlying definition.trained_model's target_type. For pre-packaged models such as ELSER the config is not required., input: any # The input field names for the model definition., metadata: map # An object map that contains metadata about the model., model_type: any=tree_ensemble # The model type., model_size_bytes: num # The estimated memory usage in bytes to keep the trained model in memory. This property is supported only if defer_definition_decompression is true or the model definition is not supplied., platform_architecture: str # The platform architecture (if applicable) of the trained mode. If the model only works on one platform, because it is heavily optimized for a particular processor architecture and OS combination, then this field specifies which. The format of the string must match the platform identifiers used by Elasticsearch, so one of, `linux-x86_64`, `linux-aarch64`, `darwin-x86_64`, `darwin-aarch64`, or `windows-x86_64`. For portable models (those that work independent of processor architecture or OS features), leave this field unset., tags: [str] # An array of tags to organize the model., prefix_strings: any # Optional prefix strings applied at inference}
@returns(200) {model_id: any, model_type: any, tags: [str], version: any, compressed_definition: str, created_by: str, create_time: any, default_field_map: map, description: str, estimated_heap_memory_usage_bytes: num, estimated_operations: num, fully_defined: bool, inference_config: any, input: any, license_level: str, metadata: any, model_size_bytes: any, model_package: any, location: any, platform_architecture: str, prefix_strings: any}

@endpoint DELETE /_ml/trained_models/{model_id}
@desc Delete an unreferenced trained model
@required {model_id: str # The unique identifier of the trained model.}
@optional {force: bool # Forcefully deletes a trained model that is referenced by ingest pipelines or has a started deployment., timeout: any # Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error.}
@returns(200) {acknowledged: bool}

@endpoint PUT /_ml/trained_models/{model_id}/model_aliases/{model_alias}
@desc Create or update a trained model alias
@required {model_id: str # The identifier for the trained model that the alias refers to., model_alias: str # The alias to create or update. This value cannot end in numbers.}
@optional {reassign: bool # Specifies whether the alias gets reassigned to the specified trained model if it is already assigned to a different model. If the alias is already assigned and this parameter is false, the API returns an error.}
@returns(200) {acknowledged: bool}

@endpoint DELETE /_ml/trained_models/{model_id}/model_aliases/{model_alias}
@desc Delete a trained model alias
@required {model_id: str # The trained model ID to which the model alias refers., model_alias: str # The model alias to delete.}
@returns(200) {acknowledged: bool}

@endpoint POST /_ml/anomaly_detectors/_estimate_model_memory
@desc Estimate job model memory usage
@optional {analysis_config: any # For a list of the properties that you can specify in the `analysis_config` component of the body of this API., max_bucket_cardinality: map # Estimates of the highest cardinality in a single bucket that is observed for influencer fields over the time period that the job analyzes data. To produce a good answer, values must be provided for all influencer fields. Providing values for fields that are not listed as `influencers` has no effect on the estimation., overall_cardinality: map # Estimates of the cardinality that is observed for fields over the whole time period that the job analyzes data. To produce a good answer, values must be provided for fields referenced in the `by_field_name`, `over_field_name` and `partition_field_name` of any detectors. Providing values for other fields has no effect on the estimation. It can be omitted from the request if no detectors have a `by_field_name`, `over_field_name` or `partition_field_name`.}
@returns(200) {model_memory_estimate: str}
@example_request "{\n  \"analysis_config\": {\n    \"bucket_span\": \"5m\",\n    \"detectors\": [\n      {\n        \"function\": \"sum\",\n        \"field_name\": \"bytes\",\n        \"by_field_name\": \"status\",\n        \"partition_field_name\": \"app\"\n      }\n    ],\n    \"influencers\": [\n      \"source_ip\",\n      \"dest_ip\"\n    ]\n  },\n  \"overall_cardinality\": {\n    \"status\": 10,\n    \"app\": 50\n  },\n  \"max_bucket_cardinality\": {\n    \"source_ip\": 300,\n    \"dest_ip\": 30\n  }\n}"

@endpoint POST /_ml/data_frame/_evaluate
@desc Evaluate data frame analytics
@required {evaluation: any # Defines the type of evaluation you want to perform., index: any # Defines the `index` in which the evaluation will be performed.}
@optional {query: any # A query clause that retrieves a subset of data from the source index.}
@returns(200) {classification: any, outlier_detection: any, regression: any}
@example_request "{\n  \"index\": \"animal_classification\",\n  \"evaluation\": {\n    \"classification\": {\n      \"actual_field\": \"animal_class\",\n      \"predicted_field\": \"ml.animal_class_prediction\",\n      \"metrics\": {\n        \"multiclass_confusion_matrix\": {}\n      }\n    }\n  }\n}"

@endpoint GET /_ml/data_frame/analytics/_explain
@desc Explain data frame analytics config
@optional {source: any # The configuration of how to source the analysis data. It requires an index. Optionally, query and _source may be specified., dest: any # The destination configuration, consisting of index and optionally results_field (ml by default)., analysis: any # The analysis configuration, which contains the information necessary to perform one of the following types of analysis: classification, outlier detection, or regression., description: str # A description of the job., model_memory_limit: str=1gb # The approximate maximum amount of memory resources that are permitted for analytical processing. If your `elasticsearch.yml` file contains an `xpack.ml.max_model_memory_limit` setting, an error occurs when you try to create data frame analytics jobs that have `model_memory_limit` values greater than that setting., max_num_threads: num=1 # The maximum number of threads to be used by the analysis. Using more threads may decrease the time necessary to complete the analysis at the cost of using more CPU. Note that the process may use additional threads for operational functionality other than the analysis itself., analyzed_fields: any # Specify includes and/or excludes patterns to select which fields will be included in the analysis. The patterns specified in excludes are applied last, therefore excludes takes precedence. In other words, if the same field is specified in both includes and excludes, then the field will not be included in the analysis., allow_lazy_start: bool=false # Specifies whether this job can start when there is insufficient machine learning node capacity for it to be immediately assigned to a node.}
@returns(200) {field_selection: [map], memory_estimation: any}
@example_request "{\n  \"source\": {\n    \"index\": \"houses_sold_last_10_yrs\"\n  },\n  \"analysis\": {\n    \"regression\": {\n      \"dependent_variable\": \"price\"\n    }\n  }\n}"

@endpoint POST /_ml/data_frame/analytics/_explain
@desc Explain data frame analytics config
@optional {source: any # The configuration of how to source the analysis data. It requires an index. Optionally, query and _source may be specified., dest: any # The destination configuration, consisting of index and optionally results_field (ml by default)., analysis: any # The analysis configuration, which contains the information necessary to perform one of the following types of analysis: classification, outlier detection, or regression., description: str # A description of the job., model_memory_limit: str=1gb # The approximate maximum amount of memory resources that are permitted for analytical processing. If your `elasticsearch.yml` file contains an `xpack.ml.max_model_memory_limit` setting, an error occurs when you try to create data frame analytics jobs that have `model_memory_limit` values greater than that setting., max_num_threads: num=1 # The maximum number of threads to be used by the analysis. Using more threads may decrease the time necessary to complete the analysis at the cost of using more CPU. Note that the process may use additional threads for operational functionality other than the analysis itself., analyzed_fields: any # Specify includes and/or excludes patterns to select which fields will be included in the analysis. The patterns specified in excludes are applied last, therefore excludes takes precedence. In other words, if the same field is specified in both includes and excludes, then the field will not be included in the analysis., allow_lazy_start: bool=false # Specifies whether this job can start when there is insufficient machine learning node capacity for it to be immediately assigned to a node.}
@returns(200) {field_selection: [map], memory_estimation: any}
@example_request "{\n  \"source\": {\n    \"index\": \"houses_sold_last_10_yrs\"\n  },\n  \"analysis\": {\n    \"regression\": {\n      \"dependent_variable\": \"price\"\n    }\n  }\n}"

@endpoint GET /_ml/data_frame/analytics/{id}/_explain
@desc Explain data frame analytics config
@required {id: str # Identifier for the data frame analytics job. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. It must start and end with alphanumeric characters.}
@optional {source: any # The configuration of how to source the analysis data. It requires an index. Optionally, query and _source may be specified., dest: any # The destination configuration, consisting of index and optionally results_field (ml by default)., analysis: any # The analysis configuration, which contains the information necessary to perform one of the following types of analysis: classification, outlier detection, or regression., description: str # A description of the job., model_memory_limit: str=1gb # The approximate maximum amount of memory resources that are permitted for analytical processing. If your `elasticsearch.yml` file contains an `xpack.ml.max_model_memory_limit` setting, an error occurs when you try to create data frame analytics jobs that have `model_memory_limit` values greater than that setting., max_num_threads: num=1 # The maximum number of threads to be used by the analysis. Using more threads may decrease the time necessary to complete the analysis at the cost of using more CPU. Note that the process may use additional threads for operational functionality other than the analysis itself., analyzed_fields: any # Specify includes and/or excludes patterns to select which fields will be included in the analysis. The patterns specified in excludes are applied last, therefore excludes takes precedence. In other words, if the same field is specified in both includes and excludes, then the field will not be included in the analysis., allow_lazy_start: bool=false # Specifies whether this job can start when there is insufficient machine learning node capacity for it to be immediately assigned to a node.}
@returns(200) {field_selection: [map], memory_estimation: any}
@example_request "{\n  \"source\": {\n    \"index\": \"houses_sold_last_10_yrs\"\n  },\n  \"analysis\": {\n    \"regression\": {\n      \"dependent_variable\": \"price\"\n    }\n  }\n}"

@endpoint POST /_ml/data_frame/analytics/{id}/_explain
@desc Explain data frame analytics config
@required {id: str # Identifier for the data frame analytics job. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. It must start and end with alphanumeric characters.}
@optional {source: any # The configuration of how to source the analysis data. It requires an index. Optionally, query and _source may be specified., dest: any # The destination configuration, consisting of index and optionally results_field (ml by default)., analysis: any # The analysis configuration, which contains the information necessary to perform one of the following types of analysis: classification, outlier detection, or regression., description: str # A description of the job., model_memory_limit: str=1gb # The approximate maximum amount of memory resources that are permitted for analytical processing. If your `elasticsearch.yml` file contains an `xpack.ml.max_model_memory_limit` setting, an error occurs when you try to create data frame analytics jobs that have `model_memory_limit` values greater than that setting., max_num_threads: num=1 # The maximum number of threads to be used by the analysis. Using more threads may decrease the time necessary to complete the analysis at the cost of using more CPU. Note that the process may use additional threads for operational functionality other than the analysis itself., analyzed_fields: any # Specify includes and/or excludes patterns to select which fields will be included in the analysis. The patterns specified in excludes are applied last, therefore excludes takes precedence. In other words, if the same field is specified in both includes and excludes, then the field will not be included in the analysis., allow_lazy_start: bool=false # Specifies whether this job can start when there is insufficient machine learning node capacity for it to be immediately assigned to a node.}
@returns(200) {field_selection: [map], memory_estimation: any}
@example_request "{\n  \"source\": {\n    \"index\": \"houses_sold_last_10_yrs\"\n  },\n  \"analysis\": {\n    \"regression\": {\n      \"dependent_variable\": \"price\"\n    }\n  }\n}"

@endpoint POST /_ml/anomaly_detectors/{job_id}/_flush
@desc Force buffered data to be processed
@required {job_id: str # Identifier for the anomaly detection job.}
@optional {advance_time: any # Specifies to advance to a particular time value. Results are generated and the model is updated for data from the specified time interval., calc_interim: bool # If true, calculates the interim results for the most recent bucket or all buckets within the latency period., end: any # When used in conjunction with `calc_interim` and `start`, specifies the range of buckets on which to calculate interim results., skip_time: any # Specifies to skip to a particular time value. Results are not generated and the model is not updated for data from the specified time interval., start: any # When used in conjunction with `calc_interim`, specifies the range of buckets on which to calculate interim results., advance_time: any # Refer to the description for the `advance_time` query parameter., calc_interim: bool # Refer to the description for the `calc_interim` query parameter., end: any # Refer to the description for the `end` query parameter., skip_time: any # Refer to the description for the `skip_time` query parameter., start: any # Refer to the description for the `start` query parameter.}
@returns(200) {flushed: bool, last_finalized_bucket_end: num}
@example_request "{\n  \"calc_interim\": true\n}"

@endpoint GET /_ml/anomaly_detectors/{job_id}/results/buckets/{timestamp}
@desc Get anomaly detection job results for buckets
@required {job_id: str # Identifier for the anomaly detection job., timestamp: any # The timestamp of a single bucket result. If you do not specify this parameter, the API returns information about all buckets.}
@optional {anomaly_score: num # Returns buckets with anomaly scores greater or equal than this value., desc: bool # If `true`, the buckets are sorted in descending order., end: any # Returns buckets with timestamps earlier than this time. `-1` means it is unset and results are not limited to specific timestamps., exclude_interim: bool # If `true`, the output excludes interim results., expand: bool # If true, the output includes anomaly records., from: num # Skips the specified number of buckets., size: num # Specifies the maximum number of buckets to obtain., sort: str # Specifies the sort field for the requested buckets., start: any # Returns buckets with timestamps after this time. `-1` means it is unset and results are not limited to specific timestamps., anomaly_score: num=0 # Refer to the description for the `anomaly_score` query parameter., desc: bool=false # Refer to the description for the `desc` query parameter., end: any=-1 # Refer to the description for the `end` query parameter., exclude_interim: bool=false # Refer to the description for the `exclude_interim` query parameter., expand: bool=false # Refer to the description for the `expand` query parameter., page: any, sort: any=timestamp # Refer to the desription for the `sort` query parameter., start: any=-1 # Refer to the description for the `start` query parameter.}
@returns(200) {buckets: [map], count: num}
@example_request "{\n  \"anomaly_score\": 80,\n  \"start\": \"1454530200001\"\n}"

@endpoint POST /_ml/anomaly_detectors/{job_id}/results/buckets/{timestamp}
@desc Get anomaly detection job results for buckets
@required {job_id: str # Identifier for the anomaly detection job., timestamp: any # The timestamp of a single bucket result. If you do not specify this parameter, the API returns information about all buckets.}
@optional {anomaly_score: num # Returns buckets with anomaly scores greater or equal than this value., desc: bool # If `true`, the buckets are sorted in descending order., end: any # Returns buckets with timestamps earlier than this time. `-1` means it is unset and results are not limited to specific timestamps., exclude_interim: bool # If `true`, the output excludes interim results., expand: bool # If true, the output includes anomaly records., from: num # Skips the specified number of buckets., size: num # Specifies the maximum number of buckets to obtain., sort: str # Specifies the sort field for the requested buckets., start: any # Returns buckets with timestamps after this time. `-1` means it is unset and results are not limited to specific timestamps., anomaly_score: num=0 # Refer to the description for the `anomaly_score` query parameter., desc: bool=false # Refer to the description for the `desc` query parameter., end: any=-1 # Refer to the description for the `end` query parameter., exclude_interim: bool=false # Refer to the description for the `exclude_interim` query parameter., expand: bool=false # Refer to the description for the `expand` query parameter., page: any, sort: any=timestamp # Refer to the desription for the `sort` query parameter., start: any=-1 # Refer to the description for the `start` query parameter.}
@returns(200) {buckets: [map], count: num}
@example_request "{\n  \"anomaly_score\": 80,\n  \"start\": \"1454530200001\"\n}"

@endpoint GET /_ml/anomaly_detectors/{job_id}/results/buckets
@desc Get anomaly detection job results for buckets
@required {job_id: str # Identifier for the anomaly detection job.}
@optional {anomaly_score: num # Returns buckets with anomaly scores greater or equal than this value., desc: bool # If `true`, the buckets are sorted in descending order., end: any # Returns buckets with timestamps earlier than this time. `-1` means it is unset and results are not limited to specific timestamps., exclude_interim: bool # If `true`, the output excludes interim results., expand: bool # If true, the output includes anomaly records., from: num # Skips the specified number of buckets., size: num # Specifies the maximum number of buckets to obtain., sort: str # Specifies the sort field for the requested buckets., start: any # Returns buckets with timestamps after this time. `-1` means it is unset and results are not limited to specific timestamps., anomaly_score: num=0 # Refer to the description for the `anomaly_score` query parameter., desc: bool=false # Refer to the description for the `desc` query parameter., end: any=-1 # Refer to the description for the `end` query parameter., exclude_interim: bool=false # Refer to the description for the `exclude_interim` query parameter., expand: bool=false # Refer to the description for the `expand` query parameter., page: any, sort: any=timestamp # Refer to the desription for the `sort` query parameter., start: any=-1 # Refer to the description for the `start` query parameter.}
@returns(200) {buckets: [map], count: num}
@example_request "{\n  \"anomaly_score\": 80,\n  \"start\": \"1454530200001\"\n}"

@endpoint POST /_ml/anomaly_detectors/{job_id}/results/buckets
@desc Get anomaly detection job results for buckets
@required {job_id: str # Identifier for the anomaly detection job.}
@optional {anomaly_score: num # Returns buckets with anomaly scores greater or equal than this value., desc: bool # If `true`, the buckets are sorted in descending order., end: any # Returns buckets with timestamps earlier than this time. `-1` means it is unset and results are not limited to specific timestamps., exclude_interim: bool # If `true`, the output excludes interim results., expand: bool # If true, the output includes anomaly records., from: num # Skips the specified number of buckets., size: num # Specifies the maximum number of buckets to obtain., sort: str # Specifies the sort field for the requested buckets., start: any # Returns buckets with timestamps after this time. `-1` means it is unset and results are not limited to specific timestamps., anomaly_score: num=0 # Refer to the description for the `anomaly_score` query parameter., desc: bool=false # Refer to the description for the `desc` query parameter., end: any=-1 # Refer to the description for the `end` query parameter., exclude_interim: bool=false # Refer to the description for the `exclude_interim` query parameter., expand: bool=false # Refer to the description for the `expand` query parameter., page: any, sort: any=timestamp # Refer to the desription for the `sort` query parameter., start: any=-1 # Refer to the description for the `start` query parameter.}
@returns(200) {buckets: [map], count: num}
@example_request "{\n  \"anomaly_score\": 80,\n  \"start\": \"1454530200001\"\n}"

@endpoint GET /_ml/calendars/{calendar_id}/events
@desc Get info about events in calendars
@required {calendar_id: str # A string that uniquely identifies a calendar. You can get information for multiple calendars by using a comma-separated list of ids or a wildcard expression. You can get information for all calendars by using `_all` or `*` or by omitting the calendar identifier.}
@optional {end: any # Specifies to get events with timestamps earlier than this time., from: num # Skips the specified number of events., job_id: str # Specifies to get events for a specific anomaly detection job identifier or job group. It must be used with a calendar identifier of `_all` or `*`., size: num # Specifies the maximum number of events to obtain., start: any # Specifies to get events with timestamps after this time.}
@returns(200) {count: num, events: [map]}

@endpoint POST /_ml/calendars/{calendar_id}/events
@desc Add scheduled events to the calendar
@required {calendar_id: str # A string that uniquely identifies a calendar., events: [map{calendar_id: any, event_id: any, description!: str, end_time!: any, start_time!: any, skip_result: bool, skip_model_update: bool, force_time_shift: num}] # A list of one of more scheduled events. The event’s start and end times can be specified as integer milliseconds since the epoch or as a string in ISO 8601 format.}
@returns(200) {events: [map]}
@example_request "{\n  \"events\" : [\n    {\"description\": \"event 1\", \"start_time\": 1513641600000, \"end_time\": 1513728000000},\n    {\"description\": \"event 2\", \"start_time\": 1513814400000, \"end_time\": 1513900800000},\n    {\"description\": \"event 3\", \"start_time\": 1514160000000, \"end_time\": 1514246400000}\n  ]\n}"

@endpoint GET /_ml/calendars
@desc Get calendar configuration info
@optional {from: num # Skips the specified number of calendars. This parameter is supported only when you omit the calendar identifier., size: num # Specifies the maximum number of calendars to obtain. This parameter is supported only when you omit the calendar identifier., page: any # This object is supported only when you omit the calendar identifier.}
@returns(200) {calendars: [map], count: num}

@endpoint POST /_ml/calendars
@desc Get calendar configuration info
@optional {from: num # Skips the specified number of calendars. This parameter is supported only when you omit the calendar identifier., size: num # Specifies the maximum number of calendars to obtain. This parameter is supported only when you omit the calendar identifier., page: any # This object is supported only when you omit the calendar identifier.}
@returns(200) {calendars: [map], count: num}

@endpoint GET /_ml/anomaly_detectors/{job_id}/results/categories/{category_id}
@desc Get anomaly detection job results for categories
@required {job_id: str # Identifier for the anomaly detection job., category_id: num # Identifier for the category, which is unique in the job. If you specify neither the category ID nor the partition_field_value, the API returns information about all categories. If you specify only the partition_field_value, it returns information about all categories for the specified partition.}
@optional {from: num # Skips the specified number of categories., partition_field_value: str # Only return categories for the specified partition., size: num # Specifies the maximum number of categories to obtain., page: any # Configures pagination. This parameter has the `from` and `size` properties.}
@returns(200) {categories: [map], count: num}
@example_request "{\n  \"page\":{\n    \"size\": 1\n  }\n}"

@endpoint POST /_ml/anomaly_detectors/{job_id}/results/categories/{category_id}
@desc Get anomaly detection job results for categories
@required {job_id: str # Identifier for the anomaly detection job., category_id: num # Identifier for the category, which is unique in the job. If you specify neither the category ID nor the partition_field_value, the API returns information about all categories. If you specify only the partition_field_value, it returns information about all categories for the specified partition.}
@optional {from: num # Skips the specified number of categories., partition_field_value: str # Only return categories for the specified partition., size: num # Specifies the maximum number of categories to obtain., page: any # Configures pagination. This parameter has the `from` and `size` properties.}
@returns(200) {categories: [map], count: num}
@example_request "{\n  \"page\":{\n    \"size\": 1\n  }\n}"

@endpoint GET /_ml/anomaly_detectors/{job_id}/results/categories
@desc Get anomaly detection job results for categories
@required {job_id: str # Identifier for the anomaly detection job.}
@optional {from: num # Skips the specified number of categories., partition_field_value: str # Only return categories for the specified partition., size: num # Specifies the maximum number of categories to obtain., page: any # Configures pagination. This parameter has the `from` and `size` properties.}
@returns(200) {categories: [map], count: num}
@example_request "{\n  \"page\":{\n    \"size\": 1\n  }\n}"

@endpoint POST /_ml/anomaly_detectors/{job_id}/results/categories
@desc Get anomaly detection job results for categories
@required {job_id: str # Identifier for the anomaly detection job.}
@optional {from: num # Skips the specified number of categories., partition_field_value: str # Only return categories for the specified partition., size: num # Specifies the maximum number of categories to obtain., page: any # Configures pagination. This parameter has the `from` and `size` properties.}
@returns(200) {categories: [map], count: num}
@example_request "{\n  \"page\":{\n    \"size\": 1\n  }\n}"

@endpoint GET /_ml/data_frame/analytics
@desc Get data frame analytics job configuration info
@optional {allow_no_match: bool # Specifies what to do when the request:  1. Contains wildcard expressions and there are no data frame analytics jobs that match. 2. Contains the `_all` string or no identifiers and there are no matches. 3. Contains wildcard expressions and there are only partial matches.  The default value returns an empty data_frame_analytics array when there are no matches and the subset of results when there are partial matches. If this parameter is `false`, the request returns a 404 status code when there are no matches or only partial matches., from: num # Skips the specified number of data frame analytics jobs., size: num # Specifies the maximum number of data frame analytics jobs to obtain., exclude_generated: bool # Indicates if certain fields should be removed from the configuration on retrieval. This allows the configuration to be in an acceptable format to be retrieved and then added to another cluster.}
@returns(200) {count: num, data_frame_analytics: [map]}

@endpoint GET /_ml/data_frame/analytics/_stats
@desc Get data frame analytics job stats
@optional {allow_no_match: bool # Specifies what to do when the request:  1. Contains wildcard expressions and there are no data frame analytics jobs that match. 2. Contains the `_all` string or no identifiers and there are no matches. 3. Contains wildcard expressions and there are only partial matches.  The default value returns an empty data_frame_analytics array when there are no matches and the subset of results when there are partial matches. If this parameter is `false`, the request returns a 404 status code when there are no matches or only partial matches., from: num # Skips the specified number of data frame analytics jobs., size: num # Specifies the maximum number of data frame analytics jobs to obtain., verbose: bool # Defines whether the stats response should be verbose.}
@returns(200) {count: num, data_frame_analytics: [map]}

@endpoint GET /_ml/data_frame/analytics/{id}/_stats
@desc Get data frame analytics job stats
@required {id: str # Identifier for the data frame analytics job. If you do not specify this option, the API returns information for the first hundred data frame analytics jobs.}
@optional {allow_no_match: bool # Specifies what to do when the request:  1. Contains wildcard expressions and there are no data frame analytics jobs that match. 2. Contains the `_all` string or no identifiers and there are no matches. 3. Contains wildcard expressions and there are only partial matches.  The default value returns an empty data_frame_analytics array when there are no matches and the subset of results when there are partial matches. If this parameter is `false`, the request returns a 404 status code when there are no matches or only partial matches., from: num # Skips the specified number of data frame analytics jobs., size: num # Specifies the maximum number of data frame analytics jobs to obtain., verbose: bool # Defines whether the stats response should be verbose.}
@returns(200) {count: num, data_frame_analytics: [map]}

@endpoint GET /_ml/datafeeds/{datafeed_id}/_stats
@desc Get datafeed stats
@required {datafeed_id: any # Identifier for the datafeed. It can be a datafeed identifier or a wildcard expression. If you do not specify one of these options, the API returns information about all datafeeds.}
@optional {allow_no_match: bool # Specifies what to do when the request:  1. Contains wildcard expressions and there are no datafeeds that match. 2. Contains the `_all` string or no identifiers and there are no matches. 3. Contains wildcard expressions and there are only partial matches.  The default value is `true`, which returns an empty `datafeeds` array when there are no matches and the subset of results when there are partial matches. If this parameter is `false`, the request returns a `404` status code when there are no matches or only partial matches.}
@returns(200) {count: num, datafeeds: [map]}

@endpoint GET /_ml/datafeeds/_stats
@desc Get datafeed stats
@optional {allow_no_match: bool # Specifies what to do when the request:  1. Contains wildcard expressions and there are no datafeeds that match. 2. Contains the `_all` string or no identifiers and there are no matches. 3. Contains wildcard expressions and there are only partial matches.  The default value is `true`, which returns an empty `datafeeds` array when there are no matches and the subset of results when there are partial matches. If this parameter is `false`, the request returns a `404` status code when there are no matches or only partial matches.}
@returns(200) {count: num, datafeeds: [map]}

@endpoint GET /_ml/datafeeds
@desc Get datafeeds configuration info
@optional {allow_no_match: bool # Specifies what to do when the request:  1. Contains wildcard expressions and there are no datafeeds that match. 2. Contains the `_all` string or no identifiers and there are no matches. 3. Contains wildcard expressions and there are only partial matches.  The default value is `true`, which returns an empty `datafeeds` array when there are no matches and the subset of results when there are partial matches. If this parameter is `false`, the request returns a `404` status code when there are no matches or only partial matches., exclude_generated: bool # Indicates if certain fields should be removed from the configuration on retrieval. This allows the configuration to be in an acceptable format to be retrieved and then added to another cluster.}
@returns(200) {count: num, datafeeds: [map]}

@endpoint GET /_ml/filters
@desc Get filters
@optional {from: num # Skips the specified number of filters., size: num # Specifies the maximum number of filters to obtain.}
@returns(200) {count: num, filters: [map]}

@endpoint GET /_ml/anomaly_detectors/{job_id}/results/influencers
@desc Get anomaly detection job results for influencers
@required {job_id: str # Identifier for the anomaly detection job.}
@optional {desc: bool # If true, the results are sorted in descending order., end: any # Returns influencers with timestamps earlier than this time. The default value means it is unset and results are not limited to specific timestamps., exclude_interim: bool # If true, the output excludes interim results. By default, interim results are included., influencer_score: num # Returns influencers with anomaly scores greater than or equal to this value., from: num # Skips the specified number of influencers., size: num # Specifies the maximum number of influencers to obtain., sort: str # Specifies the sort field for the requested influencers. By default, the influencers are sorted by the `influencer_score` value., start: any # Returns influencers with timestamps after this time. The default value means it is unset and results are not limited to specific timestamps., page: any # Configures pagination. This parameter has the `from` and `size` properties.}
@returns(200) {count: num, influencers: [map]}
@example_request "{\n  \"sort\": \"influencer_score\",\n  \"desc\": true\n}"

@endpoint POST /_ml/anomaly_detectors/{job_id}/results/influencers
@desc Get anomaly detection job results for influencers
@required {job_id: str # Identifier for the anomaly detection job.}
@optional {desc: bool # If true, the results are sorted in descending order., end: any # Returns influencers with timestamps earlier than this time. The default value means it is unset and results are not limited to specific timestamps., exclude_interim: bool # If true, the output excludes interim results. By default, interim results are included., influencer_score: num # Returns influencers with anomaly scores greater than or equal to this value., from: num # Skips the specified number of influencers., size: num # Specifies the maximum number of influencers to obtain., sort: str # Specifies the sort field for the requested influencers. By default, the influencers are sorted by the `influencer_score` value., start: any # Returns influencers with timestamps after this time. The default value means it is unset and results are not limited to specific timestamps., page: any # Configures pagination. This parameter has the `from` and `size` properties.}
@returns(200) {count: num, influencers: [map]}
@example_request "{\n  \"sort\": \"influencer_score\",\n  \"desc\": true\n}"

@endpoint GET /_ml/anomaly_detectors/_stats
@desc Get anomaly detection job stats
@optional {allow_no_match: bool # Specifies what to do when the request:  1. Contains wildcard expressions and there are no jobs that match. 2. Contains the _all string or no identifiers and there are no matches. 3. Contains wildcard expressions and there are only partial matches.  If `true`, the API returns an empty `jobs` array when there are no matches and the subset of results when there are partial matches. If `false`, the API returns a `404` status code when there are no matches or only partial matches.}
@returns(200) {count: num, jobs: [map]}

@endpoint GET /_ml/anomaly_detectors/{job_id}/_stats
@desc Get anomaly detection job stats
@required {job_id: str # Identifier for the anomaly detection job. It can be a job identifier, a group name, a comma-separated list of jobs, or a wildcard expression. If you do not specify one of these options, the API returns information for all anomaly detection jobs.}
@optional {allow_no_match: bool # Specifies what to do when the request:  1. Contains wildcard expressions and there are no jobs that match. 2. Contains the _all string or no identifiers and there are no matches. 3. Contains wildcard expressions and there are only partial matches.  If `true`, the API returns an empty `jobs` array when there are no matches and the subset of results when there are partial matches. If `false`, the API returns a `404` status code when there are no matches or only partial matches.}
@returns(200) {count: num, jobs: [map]}

@endpoint GET /_ml/anomaly_detectors
@desc Get anomaly detection jobs configuration info
@optional {allow_no_match: bool # Specifies what to do when the request:  1. Contains wildcard expressions and there are no jobs that match. 2. Contains the _all string or no identifiers and there are no matches. 3. Contains wildcard expressions and there are only partial matches.  The default value is `true`, which returns an empty `jobs` array when there are no matches and the subset of results when there are partial matches. If this parameter is `false`, the request returns a `404` status code when there are no matches or only partial matches., exclude_generated: bool # Indicates if certain fields should be removed from the configuration on retrieval. This allows the configuration to be in an acceptable format to be retrieved and then added to another cluster.}
@returns(200) {count: num, jobs: [map]}

@endpoint GET /_ml/memory/_stats
@desc Get machine learning memory usage info
@optional {master_timeout: any # Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error., timeout: any # Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error.}
@returns(200) {_nodes: any, cluster_name: any, nodes: map}

@endpoint GET /_ml/memory/{node_id}/_stats
@desc Get machine learning memory usage info
@required {node_id: str # The names of particular nodes in the cluster to target. For example, `nodeId1,nodeId2` or `ml:true`}
@optional {master_timeout: any # Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error., timeout: any # Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error.}
@returns(200) {_nodes: any, cluster_name: any, nodes: map}

@endpoint GET /_ml/anomaly_detectors/{job_id}/model_snapshots/{snapshot_id}/_upgrade/_stats
@desc Get anomaly detection job model snapshot upgrade usage info
@required {job_id: str # Identifier for the anomaly detection job., snapshot_id: str # A numerical character string that uniquely identifies the model snapshot. You can get information for multiple snapshots by using a comma-separated list or a wildcard expression. You can get all snapshots by using `_all`, by specifying `*` as the snapshot ID, or by omitting the snapshot ID.}
@optional {allow_no_match: bool # Specifies what to do when the request:   -  Contains wildcard expressions and there are no jobs that match.  -  Contains the _all string or no identifiers and there are no matches.  -  Contains wildcard expressions and there are only partial matches.  The default value is true, which returns an empty jobs array when there are no matches and the subset of results when there are partial matches. If this parameter is false, the request returns a 404 status code when there are no matches or only partial matches.}
@returns(200) {count: num, model_snapshot_upgrades: [map]}

@endpoint GET /_ml/anomaly_detectors/{job_id}/model_snapshots
@desc Get model snapshots info
@required {job_id: str # Identifier for the anomaly detection job.}
@optional {desc: bool # If true, the results are sorted in descending order., end: any # Returns snapshots with timestamps earlier than this time., from: num # Skips the specified number of snapshots., size: num # Specifies the maximum number of snapshots to obtain., sort: str # Specifies the sort field for the requested snapshots. By default, the snapshots are sorted by their timestamp., start: any # Returns snapshots with timestamps after this time., desc: bool=false # Refer to the description for the `desc` query parameter., end: any # Refer to the description for the `end` query parameter., page: any, sort: any # Refer to the description for the `sort` query parameter., start: any # Refer to the description for the `start` query parameter.}
@returns(200) {count: num, model_snapshots: [map]}
@example_request "{\n  \"start\": \"1575402236000\"\n}"

@endpoint POST /_ml/anomaly_detectors/{job_id}/model_snapshots
@desc Get model snapshots info
@required {job_id: str # Identifier for the anomaly detection job.}
@optional {desc: bool # If true, the results are sorted in descending order., end: any # Returns snapshots with timestamps earlier than this time., from: num # Skips the specified number of snapshots., size: num # Specifies the maximum number of snapshots to obtain., sort: str # Specifies the sort field for the requested snapshots. By default, the snapshots are sorted by their timestamp., start: any # Returns snapshots with timestamps after this time., desc: bool=false # Refer to the description for the `desc` query parameter., end: any # Refer to the description for the `end` query parameter., page: any, sort: any # Refer to the description for the `sort` query parameter., start: any # Refer to the description for the `start` query parameter.}
@returns(200) {count: num, model_snapshots: [map]}
@example_request "{\n  \"start\": \"1575402236000\"\n}"

@endpoint GET /_ml/anomaly_detectors/{job_id}/results/overall_buckets
@desc Get overall bucket results
@required {job_id: str # Identifier for the anomaly detection job. It can be a job identifier, a group name, a comma-separated list of jobs or groups, or a wildcard expression.  You can summarize the bucket results for all anomaly detection jobs by using `_all` or by specifying `*` as the ``.}
@optional {allow_no_match: bool # Specifies what to do when the request:  1. Contains wildcard expressions and there are no jobs that match. 2. Contains the `_all` string or no identifiers and there are no matches. 3. Contains wildcard expressions and there are only partial matches.  If `true`, the request returns an empty `jobs` array when there are no matches and the subset of results when there are partial matches. If this parameter is `false`, the request returns a `404` status code when there are no matches or only partial matches., bucket_span: any # The span of the overall buckets. Must be greater or equal to the largest bucket span of the specified anomaly detection jobs, which is the default value.  By default, an overall bucket has a span equal to the largest bucket span of the specified anomaly detection jobs. To override that behavior, use the optional `bucket_span` parameter., end: any # Returns overall buckets with timestamps earlier than this time., exclude_interim: bool # If `true`, the output excludes interim results., overall_score: num # Returns overall buckets with overall scores greater than or equal to this value., start: any # Returns overall buckets with timestamps after this time., top_n: num # The number of top anomaly detection job bucket scores to be used in the `overall_score` calculation., allow_no_match: bool=true # Refer to the description for the `allow_no_match` query parameter., bucket_span: any # Refer to the description for the `bucket_span` query parameter., end: any # Refer to the description for the `end` query parameter., exclude_interim: bool=false # Refer to the description for the `exclude_interim` query parameter., overall_score: num # Refer to the description for the `overall_score` query parameter., start: any # Refer to the description for the `start` query parameter., top_n: num=1 # Refer to the description for the `top_n` query parameter.}
@returns(200) {count: num, overall_buckets: [map]}
@example_request "{\n  \"overall_score\": 80,\n  \"start\": \"1403532000000\"\n}"

@endpoint POST /_ml/anomaly_detectors/{job_id}/results/overall_buckets
@desc Get overall bucket results
@required {job_id: str # Identifier for the anomaly detection job. It can be a job identifier, a group name, a comma-separated list of jobs or groups, or a wildcard expression.  You can summarize the bucket results for all anomaly detection jobs by using `_all` or by specifying `*` as the ``.}
@optional {allow_no_match: bool # Specifies what to do when the request:  1. Contains wildcard expressions and there are no jobs that match. 2. Contains the `_all` string or no identifiers and there are no matches. 3. Contains wildcard expressions and there are only partial matches.  If `true`, the request returns an empty `jobs` array when there are no matches and the subset of results when there are partial matches. If this parameter is `false`, the request returns a `404` status code when there are no matches or only partial matches., bucket_span: any # The span of the overall buckets. Must be greater or equal to the largest bucket span of the specified anomaly detection jobs, which is the default value.  By default, an overall bucket has a span equal to the largest bucket span of the specified anomaly detection jobs. To override that behavior, use the optional `bucket_span` parameter., end: any # Returns overall buckets with timestamps earlier than this time., exclude_interim: bool # If `true`, the output excludes interim results., overall_score: num # Returns overall buckets with overall scores greater than or equal to this value., start: any # Returns overall buckets with timestamps after this time., top_n: num # The number of top anomaly detection job bucket scores to be used in the `overall_score` calculation., allow_no_match: bool=true # Refer to the description for the `allow_no_match` query parameter., bucket_span: any # Refer to the description for the `bucket_span` query parameter., end: any # Refer to the description for the `end` query parameter., exclude_interim: bool=false # Refer to the description for the `exclude_interim` query parameter., overall_score: num # Refer to the description for the `overall_score` query parameter., start: any # Refer to the description for the `start` query parameter., top_n: num=1 # Refer to the description for the `top_n` query parameter.}
@returns(200) {count: num, overall_buckets: [map]}
@example_request "{\n  \"overall_score\": 80,\n  \"start\": \"1403532000000\"\n}"

@endpoint GET /_ml/anomaly_detectors/{job_id}/results/records
@desc Get anomaly records for an anomaly detection job
@required {job_id: str # Identifier for the anomaly detection job.}
@optional {desc: bool # If true, the results are sorted in descending order., end: any # Returns records with timestamps earlier than this time. The default value means results are not limited to specific timestamps., exclude_interim: bool # If `true`, the output excludes interim results., from: num # Skips the specified number of records., record_score: num # Returns records with anomaly scores greater or equal than this value., size: num # Specifies the maximum number of records to obtain., sort: str # Specifies the sort field for the requested records., start: any # Returns records with timestamps after this time. The default value means results are not limited to specific timestamps., desc: bool=false # Refer to the description for the `desc` query parameter., end: any=-1 # Refer to the description for the `end` query parameter., exclude_interim: bool=false # Refer to the description for the `exclude_interim` query parameter., page: any, record_score: num=0 # Refer to the description for the `record_score` query parameter., sort: any=record_score # Refer to the description for the `sort` query parameter., start: any=-1 # Refer to the description for the `start` query parameter.}
@returns(200) {count: num, records: [map]}
@example_request "{\n  \"sort\": \"record_score\",\n  \"desc\": true,\n  \"start\": \"1454944100000\"\n}"

@endpoint POST /_ml/anomaly_detectors/{job_id}/results/records
@desc Get anomaly records for an anomaly detection job
@required {job_id: str # Identifier for the anomaly detection job.}
@optional {desc: bool # If true, the results are sorted in descending order., end: any # Returns records with timestamps earlier than this time. The default value means results are not limited to specific timestamps., exclude_interim: bool # If `true`, the output excludes interim results., from: num # Skips the specified number of records., record_score: num # Returns records with anomaly scores greater or equal than this value., size: num # Specifies the maximum number of records to obtain., sort: str # Specifies the sort field for the requested records., start: any # Returns records with timestamps after this time. The default value means results are not limited to specific timestamps., desc: bool=false # Refer to the description for the `desc` query parameter., end: any=-1 # Refer to the description for the `end` query parameter., exclude_interim: bool=false # Refer to the description for the `exclude_interim` query parameter., page: any, record_score: num=0 # Refer to the description for the `record_score` query parameter., sort: any=record_score # Refer to the description for the `sort` query parameter., start: any=-1 # Refer to the description for the `start` query parameter.}
@returns(200) {count: num, records: [map]}
@example_request "{\n  \"sort\": \"record_score\",\n  \"desc\": true,\n  \"start\": \"1454944100000\"\n}"

@endpoint GET /_ml/trained_models
@desc Get trained model configuration info
@optional {allow_no_match: bool # Specifies what to do when the request:  - Contains wildcard expressions and there are no models that match. - Contains the _all string or no identifiers and there are no matches. - Contains wildcard expressions and there are only partial matches.  If true, it returns an empty array when there are no matches and the subset of results when there are partial matches., decompress_definition: bool # Specifies whether the included model definition should be returned as a JSON map (true) or in a custom compressed format (false)., exclude_generated: bool # Indicates if certain fields should be removed from the configuration on retrieval. This allows the configuration to be in an acceptable format to be retrieved and then added to another cluster., from: num # Skips the specified number of models., include: str # A comma delimited string of optional fields to include in the response body., size: num # Specifies the maximum number of models to obtain., tags: any # A comma delimited string of tags. A trained model can have many tags, or none. When supplied, only trained models that contain all the supplied tags are returned.}
@returns(200) {count: num, trained_model_configs: [map]}

@endpoint GET /_ml/trained_models/{model_id}/_stats
@desc Get trained models usage info
@required {model_id: any # The unique identifier of the trained model or a model alias. It can be a comma-separated list or a wildcard expression.}
@optional {allow_no_match: bool # Specifies what to do when the request:  - Contains wildcard expressions and there are no models that match. - Contains the _all string or no identifiers and there are no matches. - Contains wildcard expressions and there are only partial matches.  If true, it returns an empty array when there are no matches and the subset of results when there are partial matches., from: num # Skips the specified number of models., size: num # Specifies the maximum number of models to obtain.}
@returns(200) {count: num, trained_model_stats: [map]}

@endpoint GET /_ml/trained_models/_stats
@desc Get trained models usage info
@optional {allow_no_match: bool # Specifies what to do when the request:  - Contains wildcard expressions and there are no models that match. - Contains the _all string or no identifiers and there are no matches. - Contains wildcard expressions and there are only partial matches.  If true, it returns an empty array when there are no matches and the subset of results when there are partial matches., from: num # Skips the specified number of models., size: num # Specifies the maximum number of models to obtain.}
@returns(200) {count: num, trained_model_stats: [map]}

@endpoint POST /_ml/trained_models/{model_id}/_infer
@desc Evaluate a trained model
@required {model_id: str # The unique identifier of the trained model., docs: [map] # An array of objects to pass to the model for inference. The objects should contain a fields matching your configured trained model input. Typically, for NLP models, the field name is `text_field`. Currently, for NLP models, only a single value is allowed.}
@optional {timeout: any # Controls the amount of time to wait for inference results., inference_config: any # The inference configuration updates to apply on the API call}
@returns(200) {inference_results: [map]}
@example_request "{\n  \"docs\":[{\"text\": \"The fool doth think he is wise, but the wise man knows himself to be a fool.\"}]\n}"

@endpoint GET /_ml/info
@desc Get machine learning information
@returns(200) {defaults: any, limits: any, upgrade_mode: bool, native_code: any}

@endpoint POST /_ml/anomaly_detectors/{job_id}/_open
@desc Open anomaly detection jobs
@required {job_id: str # Identifier for the anomaly detection job.}
@optional {timeout: any # Controls the time to wait until a job has opened., timeout: any=30m # Refer to the description for the `timeout` query parameter.}
@returns(200) {opened: bool, node: any}
@example_request "{\n  \"timeout\": \"35m\"\n}"

@endpoint POST /_ml/anomaly_detectors/{job_id}/_data
@desc Send data to an anomaly detection job for analysis
@required {job_id: str # Identifier for the anomaly detection job. The job must have a state of open to receive and process the data.}
@optional {reset_end: any # Specifies the end of the bucket resetting range., reset_start: any # Specifies the start of the bucket resetting range.}
@returns(200) {job_id: any, processed_record_count: num, processed_field_count: num, input_bytes: num, input_field_count: num, invalid_date_count: num, missing_field_count: num, out_of_order_timestamp_count: num, empty_bucket_count: num, sparse_bucket_count: num, bucket_count: num, earliest_record_timestamp: any, latest_record_timestamp: any, last_data_time: any, latest_empty_bucket_timestamp: any, latest_sparse_bucket_timestamp: any, input_record_count: num, log_time: any}

@endpoint GET /_ml/data_frame/analytics/_preview
@desc Preview features used by data frame analytics
@optional {config: any # A data frame analytics config as described in create data frame analytics jobs. Note that `id` and `dest` don’t need to be provided in the context of this API.}
@returns(200) {feature_values: [map]}
@example_request "{\n  \"config\": {\n    \"source\": {\n      \"index\": \"houses_sold_last_10_yrs\"\n    },\n    \"analysis\": {\n      \"regression\": {\n        \"dependent_variable\": \"price\"\n      }\n    }\n  }\n}"

@endpoint POST /_ml/data_frame/analytics/_preview
@desc Preview features used by data frame analytics
@optional {config: any # A data frame analytics config as described in create data frame analytics jobs. Note that `id` and `dest` don’t need to be provided in the context of this API.}
@returns(200) {feature_values: [map]}
@example_request "{\n  \"config\": {\n    \"source\": {\n      \"index\": \"houses_sold_last_10_yrs\"\n    },\n    \"analysis\": {\n      \"regression\": {\n        \"dependent_variable\": \"price\"\n      }\n    }\n  }\n}"

@endpoint GET /_ml/data_frame/analytics/{id}/_preview
@desc Preview features used by data frame analytics
@required {id: str # Identifier for the data frame analytics job.}
@optional {config: any # A data frame analytics config as described in create data frame analytics jobs. Note that `id` and `dest` don’t need to be provided in the context of this API.}
@returns(200) {feature_values: [map]}
@example_request "{\n  \"config\": {\n    \"source\": {\n      \"index\": \"houses_sold_last_10_yrs\"\n    },\n    \"analysis\": {\n      \"regression\": {\n        \"dependent_variable\": \"price\"\n      }\n    }\n  }\n}"

@endpoint POST /_ml/data_frame/analytics/{id}/_preview
@desc Preview features used by data frame analytics
@required {id: str # Identifier for the data frame analytics job.}
@optional {config: any # A data frame analytics config as described in create data frame analytics jobs. Note that `id` and `dest` don’t need to be provided in the context of this API.}
@returns(200) {feature_values: [map]}
@example_request "{\n  \"config\": {\n    \"source\": {\n      \"index\": \"houses_sold_last_10_yrs\"\n    },\n    \"analysis\": {\n      \"regression\": {\n        \"dependent_variable\": \"price\"\n      }\n    }\n  }\n}"

@endpoint GET /_ml/datafeeds/{datafeed_id}/_preview
@desc Preview a datafeed
@required {datafeed_id: str # A numerical character string that uniquely identifies the datafeed. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. It must start and end with alphanumeric characters. NOTE: If you use this path parameter, you cannot provide datafeed or anomaly detection job configuration details in the request body.}
@optional {start: any # The start time from where the datafeed preview should begin, end: any # The end time when the datafeed preview should stop, datafeed_config: any # The datafeed definition to preview., job_config: any # The configuration details for the anomaly detection job that is associated with the datafeed. If the `datafeed_config` object does not include a `job_id` that references an existing anomaly detection job, you must supply this `job_config` object. If you include both a `job_id` and a `job_config`, the latter information is used. You cannot specify a `job_config` object unless you also supply a `datafeed_config` object.}
@returns(200)

@endpoint POST /_ml/datafeeds/{datafeed_id}/_preview
@desc Preview a datafeed
@required {datafeed_id: str # A numerical character string that uniquely identifies the datafeed. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. It must start and end with alphanumeric characters. NOTE: If you use this path parameter, you cannot provide datafeed or anomaly detection job configuration details in the request body.}
@optional {start: any # The start time from where the datafeed preview should begin, end: any # The end time when the datafeed preview should stop, datafeed_config: any # The datafeed definition to preview., job_config: any # The configuration details for the anomaly detection job that is associated with the datafeed. If the `datafeed_config` object does not include a `job_id` that references an existing anomaly detection job, you must supply this `job_config` object. If you include both a `job_id` and a `job_config`, the latter information is used. You cannot specify a `job_config` object unless you also supply a `datafeed_config` object.}
@returns(200)

@endpoint GET /_ml/datafeeds/_preview
@desc Preview a datafeed
@optional {start: any # The start time from where the datafeed preview should begin, end: any # The end time when the datafeed preview should stop, datafeed_config: any # The datafeed definition to preview., job_config: any # The configuration details for the anomaly detection job that is associated with the datafeed. If the `datafeed_config` object does not include a `job_id` that references an existing anomaly detection job, you must supply this `job_config` object. If you include both a `job_id` and a `job_config`, the latter information is used. You cannot specify a `job_config` object unless you also supply a `datafeed_config` object.}
@returns(200)

@endpoint POST /_ml/datafeeds/_preview
@desc Preview a datafeed
@optional {start: any # The start time from where the datafeed preview should begin, end: any # The end time when the datafeed preview should stop, datafeed_config: any # The datafeed definition to preview., job_config: any # The configuration details for the anomaly detection job that is associated with the datafeed. If the `datafeed_config` object does not include a `job_id` that references an existing anomaly detection job, you must supply this `job_config` object. If you include both a `job_id` and a `job_config`, the latter information is used. You cannot specify a `job_config` object unless you also supply a `datafeed_config` object.}
@returns(200)

@endpoint PUT /_ml/trained_models/{model_id}/definition/{part}
@desc Create part of a trained model definition
@required {model_id: str # The unique identifier of the trained model., part: num # The definition part number. When the definition is loaded for inference the definition parts are streamed in the order of their part number. The first part must be `0` and the final part must be `total_parts - 1`., definition: str # The definition part for the model. Must be a base64 encoded string., total_definition_length: num # The total uncompressed definition length in bytes. Not base64 encoded., total_parts: num # The total number of parts that will be uploaded. Must be greater than 0.}
@returns(200) {acknowledged: bool}
@example_request "{\n    \"definition\": \"...\",\n    \"total_definition_length\": 265632637,\n    \"total_parts\": 64\n}"

@endpoint PUT /_ml/trained_models/{model_id}/vocabulary
@desc Create a trained model vocabulary
@required {model_id: str # The unique identifier of the trained model., vocabulary: [str] # The model vocabulary, which must not be empty.}
@optional {merges: [str] # The optional model merges if required by the tokenizer., scores: [num] # The optional vocabulary value scores if required by the tokenizer.}
@returns(200) {acknowledged: bool}
@example_request "{\n  \"vocabulary\": [\n    \"[PAD]\",\n    \"[unused0]\",\n  ]\n}"

@endpoint POST /_ml/anomaly_detectors/{job_id}/_reset
@desc Reset an anomaly detection job
@required {job_id: str # The ID of the job to reset.}
@optional {wait_for_completion: bool # Should this request wait until the operation has completed before returning., delete_user_annotations: bool # Specifies whether annotations that have been added by the user should be deleted along with any auto-generated annotations when the job is reset.}
@returns(200) {acknowledged: bool}

@endpoint POST /_ml/anomaly_detectors/{job_id}/model_snapshots/{snapshot_id}/_revert
@desc Revert to a snapshot
@required {job_id: str # Identifier for the anomaly detection job., snapshot_id: str # You can specify `empty` as the . Reverting to the empty snapshot means the anomaly detection job starts learning a new model from scratch when it is started.}
@optional {delete_intervening_results: bool # If true, deletes the results in the time period between the latest results and the time of the reverted snapshot. It also resets the model to accept records for this time period. If you choose not to delete intervening results when reverting a snapshot, the job will not accept input data that is older than the current time. If you want to resend data, then delete the intervening results., delete_intervening_results: bool=false # Refer to the description for the `delete_intervening_results` query parameter.}
@returns(200) {model: any}
@example_request "{\n  \"delete_intervening_results\": true\n}"

@endpoint POST /_ml/set_upgrade_mode
@desc Set upgrade_mode for ML indices
@optional {enabled: bool # When `true`, it enables `upgrade_mode` which temporarily halts all job and datafeed tasks and prohibits new job and datafeed tasks from starting., timeout: any # The time to wait for the request to be completed.}
@returns(200) {acknowledged: bool}

@endpoint POST /_ml/data_frame/analytics/{id}/_start
@desc Start a data frame analytics job
@required {id: str # Identifier for the data frame analytics job. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. It must start and end with alphanumeric characters.}
@optional {timeout: any # Controls the amount of time to wait until the data frame analytics job starts., id: any # If provided, must be the same identifier as in the path., timeout: any=20s # Controls the amount of time to wait until the data frame analytics job starts.}
@returns(200) {acknowledged: bool, node: any}

@endpoint POST /_ml/datafeeds/{datafeed_id}/_start
@desc Start datafeeds
@required {datafeed_id: str # A numerical character string that uniquely identifies the datafeed. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. It must start and end with alphanumeric characters.}
@optional {end: any # The time that the datafeed should end, which can be specified by using one of the following formats:  * ISO 8601 format with milliseconds, for example `2017-01-22T06:00:00.000Z` * ISO 8601 format without milliseconds, for example `2017-01-22T06:00:00+00:00` * Milliseconds since the epoch, for example `1485061200000`  Date-time arguments using either of the ISO 8601 formats must have a time zone designator, where `Z` is accepted as an abbreviation for UTC time. When a URL is expected (for example, in browsers), the `+` used in time zone designators must be encoded as `%2B`. The end time value is exclusive. If you do not specify an end time, the datafeed runs continuously., start: any # The time that the datafeed should begin, which can be specified by using the same formats as the `end` parameter. This value is inclusive. If you do not specify a start time and the datafeed is associated with a new anomaly detection job, the analysis starts from the earliest time for which data is available. If you restart a stopped datafeed and specify a start value that is earlier than the timestamp of the latest processed record, the datafeed continues from 1 millisecond after the timestamp of the latest processed record., timeout: any # Specifies the amount of time to wait until a datafeed starts., end: any # Refer to the description for the `end` query parameter., start: any # Refer to the description for the `start` query parameter., timeout: any=20s # Refer to the description for the `timeout` query parameter.}
@returns(200) {node: any, started: bool}
@example_request "{\n  \"start\": \"2019-04-07T18:22:16Z\"\n}"

@endpoint POST /_ml/trained_models/{model_id}/deployment/_start
@desc Start a trained model deployment
@required {model_id: str # The unique identifier of the trained model. Currently, only PyTorch models are supported.}
@optional {cache_size: any # The inference cache size (in memory outside the JVM heap) per node for the model. The default value is the same size as the `model_size_bytes`. To disable the cache, `0b` can be provided., deployment_id: str # A unique identifier for the deployment of the model., number_of_allocations: num # The number of model allocations on each node where the model is deployed. All allocations on a node share the same copy of the model in memory but use a separate set of threads to evaluate the model. Increasing this value generally increases the throughput. If this setting is greater than the number of hardware threads it will automatically be changed to a value less than the number of hardware threads. If adaptive_allocations is enabled, do not set this value, because it’s automatically set., priority: str # The deployment priority, queue_capacity: num # Specifies the number of inference requests that are allowed in the queue. After the number of requests exceeds this value, new requests are rejected with a 429 error., threads_per_allocation: num # Sets the number of threads used by each model allocation during inference. This generally increases the inference speed. The inference process is a compute-bound process; any number greater than the number of available hardware threads on the machine does not increase the inference speed. If this setting is greater than the number of hardware threads it will automatically be changed to a value less than the number of hardware threads., timeout: any # Specifies the amount of time to wait for the model to deploy., wait_for: str # Specifies the allocation status to wait for before returning., adaptive_allocations: any # Adaptive allocations configuration. When enabled, the number of allocations is set based on the current load. If adaptive_allocations is enabled, do not set the number of allocations manually.}
@returns(200) {assignment: any}

@endpoint POST /_ml/data_frame/analytics/{id}/_stop
@desc Stop data frame analytics jobs
@required {id: str # Identifier for the data frame analytics job. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. It must start and end with alphanumeric characters.}
@optional {allow_no_match: bool # Specifies what to do when the request:  1. Contains wildcard expressions and there are no data frame analytics jobs that match. 2. Contains the _all string or no identifiers and there are no matches. 3. Contains wildcard expressions and there are only partial matches.  The default value is true, which returns an empty data_frame_analytics array when there are no matches and the subset of results when there are partial matches. If this parameter is false, the request returns a 404 status code when there are no matches or only partial matches., force: bool # If true, the data frame analytics job is stopped forcefully., timeout: any # Controls the amount of time to wait until the data frame analytics job stops. Defaults to 20 seconds., id: any # If provided, must be the same identifier as in the path., allow_no_match: bool=true # Specifies what to do when the request:  1. Contains wildcard expressions and there are no data frame analytics jobs that match. 2. Contains the _all string or no identifiers and there are no matches. 3. Contains wildcard expressions and there are only partial matches.  The default value is true, which returns an empty data_frame_analytics array when there are no matches and the subset of results when there are partial matches. If this parameter is false, the request returns a 404 status code when there are no matches or only partial matches., force: bool=false # If true, the data frame analytics job is stopped forcefully., timeout: any=20s # Controls the amount of time to wait until the data frame analytics job stops. Defaults to 20 seconds.}
@returns(200) {stopped: bool}

@endpoint POST /_ml/datafeeds/{datafeed_id}/_stop
@desc Stop datafeeds
@required {datafeed_id: str # Identifier for the datafeed. You can stop multiple datafeeds in a single API request by using a comma-separated list of datafeeds or a wildcard expression. You can close all datafeeds by using `_all` or by specifying `*` as the identifier.}
@optional {allow_no_match: bool # Specifies what to do when the request:  * Contains wildcard expressions and there are no datafeeds that match. * Contains the `_all` string or no identifiers and there are no matches. * Contains wildcard expressions and there are only partial matches.  If `true`, the API returns an empty datafeeds array when there are no matches and the subset of results when there are partial matches. If `false`, the API returns a 404 status code when there are no matches or only partial matches., force: bool # If `true`, the datafeed is stopped forcefully., timeout: any # Specifies the amount of time to wait until a datafeed stops., close_job: bool # If `true` the job associated with the datafeed is closed., allow_no_match: bool=true # Refer to the description for the `allow_no_match` query parameter., force: bool=false # Refer to the description for the `force` query parameter., timeout: any=20s # Refer to the description for the `timeout` query parameter., close_job: bool=false # Refer to the description for the `close_job` query parameter.}
@returns(200) {stopped: bool}
@example_request "{\n  \"timeout\": \"30s\"\n}"

@endpoint POST /_ml/trained_models/{model_id}/deployment/_stop
@desc Stop a trained model deployment
@required {model_id: str # The unique identifier of the trained model.}
@optional {allow_no_match: bool # Specifies what to do when the request: contains wildcard expressions and there are no deployments that match; contains the  `_all` string or no identifiers and there are no matches; or contains wildcard expressions and there are only partial matches. By default, it returns an empty array when there are no matches and the subset of results when there are partial matches. If `false`, the request returns a 404 status code when there are no matches or only partial matches., force: bool # Forcefully stops the deployment, even if it is used by ingest pipelines. You can't use these pipelines until you restart the model deployment., id: any # If provided, must be the same identifier as in the path., allow_no_match: bool=true # Specifies what to do when the request: contains wildcard expressions and there are no deployments that match; contains the  `_all` string or no identifiers and there are no matches; or contains wildcard expressions and there are only partial matches. By default, it returns an empty array when there are no matches and the subset of results when there are partial matches. If `false`, the request returns a 404 status code when there are no matches or only partial matches., force: bool=false # Forcefully stops the deployment, even if it is used by ingest pipelines. You can't use these pipelines until you restart the model deployment.}
@returns(200) {stopped: bool}

@endpoint POST /_ml/data_frame/analytics/{id}/_update
@desc Update a data frame analytics job
@required {id: str # Identifier for the data frame analytics job. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. It must start and end with alphanumeric characters.}
@optional {description: str # A description of the job., model_memory_limit: str=1gb # The approximate maximum amount of memory resources that are permitted for analytical processing. If your `elasticsearch.yml` file contains an `xpack.ml.max_model_memory_limit` setting, an error occurs when you try to create data frame analytics jobs that have `model_memory_limit` values greater than that setting., max_num_threads: num=1 # The maximum number of threads to be used by the analysis. Using more threads may decrease the time necessary to complete the analysis at the cost of using more CPU. Note that the process may use additional threads for operational functionality other than the analysis itself., allow_lazy_start: bool=false # Specifies whether this job can start when there is insufficient machine learning node capacity for it to be immediately assigned to a node.}
@returns(200) {authorization: any, allow_lazy_start: bool, analysis: any, analyzed_fields: any, create_time: num, description: str, dest: any, id: any, max_num_threads: num, model_memory_limit: str, source: any, version: any}
@example_request "{\n  \"model_memory_limit\": \"200mb\"\n}"

@endpoint POST /_ml/datafeeds/{datafeed_id}/_update
@desc Update a datafeed
@required {datafeed_id: str # A numerical character string that uniquely identifies the datafeed. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. It must start and end with alphanumeric characters.}
@optional {allow_no_indices: bool # A setting that does two separate checks on the index expression. If `false`, the request returns an error (1) if any wildcard expression (including `_all` and `*`) resolves to zero matching indices or (2) if the complete set of resolved indices, aliases or data streams is empty after all expressions are evaluated. If `true`, index expressions that resolve to no indices are allowed and the request returns an empty result., expand_wildcards: any # Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values., ignore_throttled: bool # If `true`, concrete, expanded or aliased indices are ignored when frozen., ignore_unavailable: bool # If `false`, the request returns an error if it targets a concrete (non-wildcarded) index, alias, or data stream that is missing, closed, or otherwise unavailable. If `true`, unavailable concrete targets are silently ignored., aggregations: map # If set, the datafeed performs aggregation searches. Support for aggregations is limited and should be used only with low cardinality data., chunking_config: any # Datafeeds might search over long time periods, for several months or years. This search is split into time chunks in order to ensure the load on Elasticsearch is managed. Chunking configuration controls how the size of these time chunks are calculated; it is an advanced configuration option., delayed_data_check_config: any # Specifies whether the datafeed checks for missing data and the size of the window. The datafeed can optionally search over indices that have already been read in an effort to determine whether any data has subsequently been added to the index. If missing data is found, it is a good indication that the `query_delay` is set too low and the data is being indexed after the datafeed has passed that moment in time. This check runs only on real-time datafeeds., frequency: any # The interval at which scheduled queries are made while the datafeed runs in real time. The default value is either the bucket span for short bucket spans, or, for longer bucket spans, a sensible fraction of the bucket span. When `frequency` is shorter than the bucket span, interim results for the last (partial) bucket are written then eventually overwritten by the full bucket results. If the datafeed uses aggregations, this value must be divisible by the interval of the date histogram aggregation., indices: [str] # An array of index names. Wildcards are supported. If any of the indices are in remote clusters, the machine learning nodes must have the `remote_cluster_client` role., indices_options: any # Specifies index expansion options that are used during search., job_id: any, max_empty_searches: num # If a real-time datafeed has never seen any data (including during any initial training period), it automatically stops and closes the associated job after this many real-time searches return no documents. In other words, it stops after `frequency` times `max_empty_searches` of real-time operation. If not set, a datafeed with no end time that sees no data remains started until it is explicitly stopped. By default, it is not set., query: any={"match_all": {"boost": 1}} # The Elasticsearch query domain-specific language (DSL). This value corresponds to the query object in an Elasticsearch search POST body. All the options that are supported by Elasticsearch can be used, as this object is passed verbatim to Elasticsearch. Note that if you change the query, the analyzed data is also changed. Therefore, the time required to learn might be long and the understandability of the results is unpredictable. If you want to make significant changes to the source data, it is recommended that you clone the job and datafeed and make the amendments in the clone. Let both run in parallel and close one when you are satisfied with the results of the job., query_delay: any # The number of seconds behind real time that data is queried. For example, if data from 10:04 a.m. might not be searchable in Elasticsearch until 10:06 a.m., set this property to 120 seconds. The default value is randomly selected between `60s` and `120s`. This randomness improves the query performance when there are multiple jobs running on the same node., runtime_mappings: any # Specifies runtime fields for the datafeed search., script_fields: map # Specifies scripts that evaluate custom expressions and returns script fields to the datafeed. The detector configuration objects in a job can contain functions that use these script fields., scroll_size: num=1000 # The size parameter that is used in Elasticsearch searches when the datafeed does not use aggregations. The maximum value is the value of `index.max_result_window`.}
@returns(200) {authorization: any, aggregations: map, chunking_config: any, delayed_data_check_config: any, datafeed_id: any, frequency: any, indices: [str], indices_options: any, job_id: any, max_empty_searches: num, query: any, query_delay: any, runtime_mappings: any, script_fields: map, scroll_size: num}
@example_request "{\n  \"query\": {\n    \"term\": {\n      \"geo.src\": \"US\"\n    }\n  }\n}"

@endpoint POST /_ml/filters/{filter_id}/_update
@desc Update a filter
@required {filter_id: str # A string that uniquely identifies a filter.}
@optional {add_items: [str] # The items to add to the filter., description: str # A description for the filter., remove_items: [str] # The items to remove from the filter.}
@returns(200) {description: str, filter_id: any, items: [str]}
@example_request "{\n  \"description\": \"Updated list of domains\",\n  \"add_items\": [\"*.myorg.com\"],\n  \"remove_items\": [\"wikipedia.org\"]\n}"

@endpoint POST /_ml/anomaly_detectors/{job_id}/_update
@desc Update an anomaly detection job
@required {job_id: str # Identifier for the job.}
@optional {allow_lazy_open: bool=false # Advanced configuration option. Specifies whether this job can open when there is insufficient machine learning node capacity for it to be immediately assigned to a node. If `false` and a machine learning node with capacity to run the job cannot immediately be found, the open anomaly detection jobs API returns an error. However, this is also subject to the cluster-wide `xpack.ml.max_lazy_ml_nodes` setting. If this option is set to `true`, the open anomaly detection jobs API does not return an error and the job waits in the opening state until sufficient machine learning node capacity is available., analysis_limits: any, background_persist_interval: any # Advanced configuration option. The time between each periodic persistence of the model. The default value is a randomized value between 3 to 4 hours, which avoids all jobs persisting at exactly the same time. The smallest allowed value is 1 hour. For very large models (several GB), persistence could take 10-20 minutes, so do not set the value too low. If the job is open when you make the update, you must stop the datafeed, close the job, then reopen the job and restart the datafeed for the changes to take effect., custom_settings: map # Advanced configuration option. Contains custom meta data about the job. For example, it can contain custom URL information as shown in Adding custom URLs to machine learning results., categorization_filters: [str], description: str # A description of the job., model_plot_config: any, model_prune_window: any, daily_model_snapshot_retention_after_days: num=1 # Advanced configuration option, which affects the automatic removal of old model snapshots for this job. It specifies a period of time (in days) after which only the first snapshot per day is retained. This period is relative to the timestamp of the most recent snapshot for this job. Valid values range from 0 to `model_snapshot_retention_days`. For jobs created before version 7.8.0, the default value matches `model_snapshot_retention_days`., model_snapshot_retention_days: num=10 # Advanced configuration option, which affects the automatic removal of old model snapshots for this job. It specifies the maximum period of time (in days) that snapshots are retained. This period is relative to the timestamp of the most recent snapshot for this job., renormalization_window_days: num # Advanced configuration option. The period over which adjustments to the score are applied, as new data is seen., results_retention_days: num # Advanced configuration option. The period of time (in days) that results are retained. Age is calculated relative to the timestamp of the latest bucket result. If this property has a non-null value, once per day at 00:30 (server time), results that are the specified number of days older than the latest bucket result are deleted from Elasticsearch. The default value is null, which means all results are retained., groups: [str] # A list of job groups. A job can belong to no groups or many., detectors: [map{detector_index!: num, description: str, custom_rules: [map]}] # An array of detector update objects., per_partition_categorization: any # Settings related to how categorization interacts with partition fields.}
@returns(200) {allow_lazy_open: bool, analysis_config: any, analysis_limits: any, background_persist_interval: any, create_time: any, finished_time: any, custom_settings: map, daily_model_snapshot_retention_after_days: num, data_description: any, datafeed_config: any, description: str, groups: [str], job_id: any, job_type: str, job_version: any, model_plot_config: any, model_snapshot_id: any, model_snapshot_retention_days: num, renormalization_window_days: num, results_index_name: any, results_retention_days: num}
@example_request "{\n  \"description\":\"An updated job\",\n  \"detectors\": {\n    \"detector_index\": 0,\n    \"description\": \"An updated detector description\"\n  },\n  \"groups\": [\"kibana_sample_data\",\"kibana_sample_web_logs\"],\n  \"model_plot_config\": {\n    \"enabled\": true\n  },\n  \"renormalization_window_days\": 30,\n  \"background_persist_interval\": \"2h\",\n  \"model_snapshot_retention_days\": 7,\n  \"results_retention_days\": 60\n}"

@endpoint POST /_ml/anomaly_detectors/{job_id}/model_snapshots/{snapshot_id}/_update
@desc Update a snapshot
@required {job_id: str # Identifier for the anomaly detection job., snapshot_id: str # Identifier for the model snapshot.}
@optional {description: str # A description of the model snapshot., retain: bool=false # If `true`, this snapshot will not be deleted during automatic cleanup of snapshots older than `model_snapshot_retention_days`. However, this snapshot will be deleted when the job is deleted.}
@returns(200) {acknowledged: bool, model: any}
@example_request "_ml/anomaly_detectors/it_ops_new_logs/model_snapshots/1491852978/_update\n{\n  \"description\": \"Snapshot 1\",\n  \"retain\": true\n}"

@endpoint POST /_ml/trained_models/{model_id}/deployment/_update
@desc Update a trained model deployment
@required {model_id: str # The unique identifier of the trained model. Currently, only PyTorch models are supported.}
@optional {number_of_allocations: num # The number of model allocations on each node where the model is deployed. All allocations on a node share the same copy of the model in memory but use a separate set of threads to evaluate the model. Increasing this value generally increases the throughput. If this setting is greater than the number of hardware threads it will automatically be changed to a value less than the number of hardware threads., number_of_allocations: num=1 # The number of model allocations on each node where the model is deployed. All allocations on a node share the same copy of the model in memory but use a separate set of threads to evaluate the model. Increasing this value generally increases the throughput. If this setting is greater than the number of hardware threads it will automatically be changed to a value less than the number of hardware threads. If adaptive_allocations is enabled, do not set this value, because it’s automatically set., adaptive_allocations: any # Adaptive allocations configuration. When enabled, the number of allocations is set based on the current load. If adaptive_allocations is enabled, do not set the number of allocations manually.}
@returns(200) {assignment: any}
@example_request "{\n  \"number_of_allocations\": 4\n}"

@endpoint POST /_ml/anomaly_detectors/{job_id}/model_snapshots/{snapshot_id}/_upgrade
@desc Upgrade a snapshot
@required {job_id: str # Identifier for the anomaly detection job., snapshot_id: str # A numerical character string that uniquely identifies the model snapshot.}
@optional {wait_for_completion: bool # When true, the API won’t respond until the upgrade is complete. Otherwise, it responds as soon as the upgrade task is assigned to a node., timeout: any # Controls the time to wait for the request to complete.}
@returns(200) {node: any, completed: bool}

@endgroup

@group _msearch
@endpoint GET /_msearch
@desc Run multiple searches
@optional {allow_no_indices: bool # A setting that does two separate checks on the index expression. If `false`, the request returns an error (1) if any wildcard expression (including `_all` and `*`) resolves to zero matching indices or (2) if the complete set of resolved indices, aliases or data streams is empty after all expressions are evaluated. If `true`, index expressions that resolve to no indices are allowed and the request returns an empty result., ccs_minimize_roundtrips: bool # If true, network roundtrips between the coordinating node and remote clusters are minimized for cross-cluster search requests., expand_wildcards: any # Type of index that wildcard expressions can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams., ignore_throttled: bool # If true, concrete, expanded or aliased indices are ignored when frozen., ignore_unavailable: bool # If `false`, the request returns an error if it targets a concrete (non-wildcarded) index, alias, or data stream that is missing, closed, or otherwise unavailable. If `true`, unavailable concrete targets are silently ignored., include_named_queries_score: bool # Indicates whether hit.matched_queries should be rendered as a map that includes the name of the matched query associated with its score (true) or as an array containing the name of the matched queries (false) This functionality reruns each named query on every hit in a search response. Typically, this adds a small overhead to a request. However, using computationally expensive named queries on a large number of hits may add significant overhead., index: any # Comma-separated list of data streams, indices, and index aliases to use as default, max_concurrent_searches: num # Maximum number of concurrent searches the multi search API can execute. Defaults to `max(1, (# of data nodes * min(search thread pool size, 10)))`., max_concurrent_shard_requests: num # Maximum number of concurrent shard requests that each sub-search request executes per node., pre_filter_shard_size: num # Defines a threshold that enforces a pre-filter roundtrip to prefilter search shards based on query rewriting if the number of shards the search request expands to exceeds the threshold. This filter roundtrip can limit the number of shards significantly if for instance a shard can not match any documents based on its rewrite method i.e., if date filters are mandatory to match but the shard bounds and the query are disjoint., rest_total_hits_as_int: bool # If true, hits.total are returned as an integer in the response. Defaults to false, which returns an object., routing: any # Custom routing value used to route search operations to a specific shard., search_type: str # Indicates whether global term and document frequencies should be used when scoring returned documents., typed_keys: bool # Specifies whether aggregation and suggester names should be prefixed by their respective types in the response.}
@returns(200) {took: num, responses: [any]}
@example_request "{ }\n{\"query\" : {\"match\" : { \"message\": \"this is a test\"}}}\n{\"index\": \"my-index-000002\"}\n{\"query\" : {\"match_all\" : {}}}"

@endpoint POST /_msearch
@desc Run multiple searches
@optional {allow_no_indices: bool # A setting that does two separate checks on the index expression. If `false`, the request returns an error (1) if any wildcard expression (including `_all` and `*`) resolves to zero matching indices or (2) if the complete set of resolved indices, aliases or data streams is empty after all expressions are evaluated. If `true`, index expressions that resolve to no indices are allowed and the request returns an empty result., ccs_minimize_roundtrips: bool # If true, network roundtrips between the coordinating node and remote clusters are minimized for cross-cluster search requests., expand_wildcards: any # Type of index that wildcard expressions can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams., ignore_throttled: bool # If true, concrete, expanded or aliased indices are ignored when frozen., ignore_unavailable: bool # If `false`, the request returns an error if it targets a concrete (non-wildcarded) index, alias, or data stream that is missing, closed, or otherwise unavailable. If `true`, unavailable concrete targets are silently ignored., include_named_queries_score: bool # Indicates whether hit.matched_queries should be rendered as a map that includes the name of the matched query associated with its score (true) or as an array containing the name of the matched queries (false) This functionality reruns each named query on every hit in a search response. Typically, this adds a small overhead to a request. However, using computationally expensive named queries on a large number of hits may add significant overhead., index: any # Comma-separated list of data streams, indices, and index aliases to use as default, max_concurrent_searches: num # Maximum number of concurrent searches the multi search API can execute. Defaults to `max(1, (# of data nodes * min(search thread pool size, 10)))`., max_concurrent_shard_requests: num # Maximum number of concurrent shard requests that each sub-search request executes per node., pre_filter_shard_size: num # Defines a threshold that enforces a pre-filter roundtrip to prefilter search shards based on query rewriting if the number of shards the search request expands to exceeds the threshold. This filter roundtrip can limit the number of shards significantly if for instance a shard can not match any documents based on its rewrite method i.e., if date filters are mandatory to match but the shard bounds and the query are disjoint., rest_total_hits_as_int: bool # If true, hits.total are returned as an integer in the response. Defaults to false, which returns an object., routing: any # Custom routing value used to route search operations to a specific shard., search_type: str # Indicates whether global term and document frequencies should be used when scoring returned documents., typed_keys: bool # Specifies whether aggregation and suggester names should be prefixed by their respective types in the response.}
@returns(200) {took: num, responses: [any]}
@example_request "{ }\n{\"query\" : {\"match\" : { \"message\": \"this is a test\"}}}\n{\"index\": \"my-index-000002\"}\n{\"query\" : {\"match_all\" : {}}}"

@endgroup

@group {index}
@endpoint GET /{index}/_msearch
@desc Run multiple searches
@required {index: any # Comma-separated list of data streams, indices, and index aliases to search.}
@optional {allow_no_indices: bool # A setting that does two separate checks on the index expression. If `false`, the request returns an error (1) if any wildcard expression (including `_all` and `*`) resolves to zero matching indices or (2) if the complete set of resolved indices, aliases or data streams is empty after all expressions are evaluated. If `true`, index expressions that resolve to no indices are allowed and the request returns an empty result., ccs_minimize_roundtrips: bool # If true, network roundtrips between the coordinating node and remote clusters are minimized for cross-cluster search requests., expand_wildcards: any # Type of index that wildcard expressions can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams., ignore_throttled: bool # If true, concrete, expanded or aliased indices are ignored when frozen., ignore_unavailable: bool # If `false`, the request returns an error if it targets a concrete (non-wildcarded) index, alias, or data stream that is missing, closed, or otherwise unavailable. If `true`, unavailable concrete targets are silently ignored., include_named_queries_score: bool # Indicates whether hit.matched_queries should be rendered as a map that includes the name of the matched query associated with its score (true) or as an array containing the name of the matched queries (false) This functionality reruns each named query on every hit in a search response. Typically, this adds a small overhead to a request. However, using computationally expensive named queries on a large number of hits may add significant overhead., index: any # Comma-separated list of data streams, indices, and index aliases to use as default, max_concurrent_searches: num # Maximum number of concurrent searches the multi search API can execute. Defaults to `max(1, (# of data nodes * min(search thread pool size, 10)))`., max_concurrent_shard_requests: num # Maximum number of concurrent shard requests that each sub-search request executes per node., pre_filter_shard_size: num # Defines a threshold that enforces a pre-filter roundtrip to prefilter search shards based on query rewriting if the number of shards the search request expands to exceeds the threshold. This filter roundtrip can limit the number of shards significantly if for instance a shard can not match any documents based on its rewrite method i.e., if date filters are mandatory to match but the shard bounds and the query are disjoint., rest_total_hits_as_int: bool # If true, hits.total are returned as an integer in the response. Defaults to false, which returns an object., routing: any # Custom routing value used to route search operations to a specific shard., search_type: str # Indicates whether global term and document frequencies should be used when scoring returned documents., typed_keys: bool # Specifies whether aggregation and suggester names should be prefixed by their respective types in the response.}
@returns(200) {took: num, responses: [any]}
@example_request "{ }\n{\"query\" : {\"match\" : { \"message\": \"this is a test\"}}}\n{\"index\": \"my-index-000002\"}\n{\"query\" : {\"match_all\" : {}}}"

@endpoint POST /{index}/_msearch
@desc Run multiple searches
@required {index: any # Comma-separated list of data streams, indices, and index aliases to search.}
@optional {allow_no_indices: bool # A setting that does two separate checks on the index expression. If `false`, the request returns an error (1) if any wildcard expression (including `_all` and `*`) resolves to zero matching indices or (2) if the complete set of resolved indices, aliases or data streams is empty after all expressions are evaluated. If `true`, index expressions that resolve to no indices are allowed and the request returns an empty result., ccs_minimize_roundtrips: bool # If true, network roundtrips between the coordinating node and remote clusters are minimized for cross-cluster search requests., expand_wildcards: any # Type of index that wildcard expressions can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams., ignore_throttled: bool # If true, concrete, expanded or aliased indices are ignored when frozen., ignore_unavailable: bool # If `false`, the request returns an error if it targets a concrete (non-wildcarded) index, alias, or data stream that is missing, closed, or otherwise unavailable. If `true`, unavailable concrete targets are silently ignored., include_named_queries_score: bool # Indicates whether hit.matched_queries should be rendered as a map that includes the name of the matched query associated with its score (true) or as an array containing the name of the matched queries (false) This functionality reruns each named query on every hit in a search response. Typically, this adds a small overhead to a request. However, using computationally expensive named queries on a large number of hits may add significant overhead., index: any # Comma-separated list of data streams, indices, and index aliases to use as default, max_concurrent_searches: num # Maximum number of concurrent searches the multi search API can execute. Defaults to `max(1, (# of data nodes * min(search thread pool size, 10)))`., max_concurrent_shard_requests: num # Maximum number of concurrent shard requests that each sub-search request executes per node., pre_filter_shard_size: num # Defines a threshold that enforces a pre-filter roundtrip to prefilter search shards based on query rewriting if the number of shards the search request expands to exceeds the threshold. This filter roundtrip can limit the number of shards significantly if for instance a shard can not match any documents based on its rewrite method i.e., if date filters are mandatory to match but the shard bounds and the query are disjoint., rest_total_hits_as_int: bool # If true, hits.total are returned as an integer in the response. Defaults to false, which returns an object., routing: any # Custom routing value used to route search operations to a specific shard., search_type: str # Indicates whether global term and document frequencies should be used when scoring returned documents., typed_keys: bool # Specifies whether aggregation and suggester names should be prefixed by their respective types in the response.}
@returns(200) {took: num, responses: [any]}
@example_request "{ }\n{\"query\" : {\"match\" : { \"message\": \"this is a test\"}}}\n{\"index\": \"my-index-000002\"}\n{\"query\" : {\"match_all\" : {}}}"

@endgroup

@group _msearch
@endpoint GET /_msearch/template
@desc Run multiple templated searches
@optional {ccs_minimize_roundtrips: bool # If `true`, network round-trips are minimized for cross-cluster search requests., max_concurrent_searches: num # The maximum number of concurrent searches the API can run., search_type: str # The type of the search operation., rest_total_hits_as_int: bool # If `true`, the response returns `hits.total` as an integer. If `false`, it returns `hits.total` as an object., typed_keys: bool # If `true`, the response prefixes aggregation and suggester names with their respective types.}
@returns(200) {took: num, responses: [any]}
@example_request "{ }\n{ \"id\": \"my-search-template\", \"params\": { \"query_string\": \"hello world\", \"from\": 0, \"size\": 10 }}\n{ }\n{ \"id\": \"my-other-search-template\", \"params\": { \"query_type\": \"match_all\" }}"

@endpoint POST /_msearch/template
@desc Run multiple templated searches
@optional {ccs_minimize_roundtrips: bool # If `true`, network round-trips are minimized for cross-cluster search requests., max_concurrent_searches: num # The maximum number of concurrent searches the API can run., search_type: str # The type of the search operation., rest_total_hits_as_int: bool # If `true`, the response returns `hits.total` as an integer. If `false`, it returns `hits.total` as an object., typed_keys: bool # If `true`, the response prefixes aggregation and suggester names with their respective types.}
@returns(200) {took: num, responses: [any]}
@example_request "{ }\n{ \"id\": \"my-search-template\", \"params\": { \"query_string\": \"hello world\", \"from\": 0, \"size\": 10 }}\n{ }\n{ \"id\": \"my-other-search-template\", \"params\": { \"query_type\": \"match_all\" }}"

@endgroup

@group {index}
@endpoint GET /{index}/_msearch/template
@desc Run multiple templated searches
@required {index: any # A comma-separated list of data streams, indices, and aliases to search. It supports wildcards (`*`). To search all data streams and indices, omit this parameter or use `*`.}
@optional {ccs_minimize_roundtrips: bool # If `true`, network round-trips are minimized for cross-cluster search requests., max_concurrent_searches: num # The maximum number of concurrent searches the API can run., search_type: str # The type of the search operation., rest_total_hits_as_int: bool # If `true`, the response returns `hits.total` as an integer. If `false`, it returns `hits.total` as an object., typed_keys: bool # If `true`, the response prefixes aggregation and suggester names with their respective types.}
@returns(200) {took: num, responses: [any]}
@example_request "{ }\n{ \"id\": \"my-search-template\", \"params\": { \"query_string\": \"hello world\", \"from\": 0, \"size\": 10 }}\n{ }\n{ \"id\": \"my-other-search-template\", \"params\": { \"query_type\": \"match_all\" }}"

@endpoint POST /{index}/_msearch/template
@desc Run multiple templated searches
@required {index: any # A comma-separated list of data streams, indices, and aliases to search. It supports wildcards (`*`). To search all data streams and indices, omit this parameter or use `*`.}
@optional {ccs_minimize_roundtrips: bool # If `true`, network round-trips are minimized for cross-cluster search requests., max_concurrent_searches: num # The maximum number of concurrent searches the API can run., search_type: str # The type of the search operation., rest_total_hits_as_int: bool # If `true`, the response returns `hits.total` as an integer. If `false`, it returns `hits.total` as an object., typed_keys: bool # If `true`, the response prefixes aggregation and suggester names with their respective types.}
@returns(200) {took: num, responses: [any]}
@example_request "{ }\n{ \"id\": \"my-search-template\", \"params\": { \"query_string\": \"hello world\", \"from\": 0, \"size\": 10 }}\n{ }\n{ \"id\": \"my-other-search-template\", \"params\": { \"query_type\": \"match_all\" }}"

@endgroup

@group _mtermvectors
@endpoint GET /_mtermvectors
@desc Get multiple term vectors
@optional {ids: [str] # A comma-separated list of documents ids. You must define ids as parameter or set "ids" or "docs" in the request body, fields: any # A comma-separated list or wildcard expressions of fields to include in the statistics. It is used as the default list unless a specific field list is provided in the `completion_fields` or `fielddata_fields` parameters., field_statistics: bool # If `true`, the response includes the document count, sum of document frequencies, and sum of total term frequencies., offsets: bool # If `true`, the response includes term offsets., payloads: bool # If `true`, the response includes term payloads., positions: bool # If `true`, the response includes term positions., preference: str # The node or shard the operation should be performed on. It is random by default., realtime: bool # If true, the request is real-time as opposed to near-real-time., routing: any # A custom value used to route operations to a specific shard., term_statistics: bool # If true, the response includes term frequency and document frequency., version: num # If `true`, returns the document version as part of a hit., version_type: str # The version type., docs: [map{_id: any, _index: any, doc: map, fields: any, field_statistics: bool, filter: any, offsets: bool, payloads: bool, positions: bool, routing: any, term_statistics: bool, version: any, version_type: any}] # An array of existing or artificial documents., ids: [str] # A simplified syntax to specify documents by their ID if they're in the same index.}
@returns(200) {docs: [map]}
@example_request "{\n  \"docs\": [\n      {\n        \"_id\": \"2\",\n        \"fields\": [\n            \"message\"\n        ],\n        \"term_statistics\": true\n      },\n      {\n        \"_id\": \"1\"\n      }\n  ]\n}"

@endpoint POST /_mtermvectors
@desc Get multiple term vectors
@optional {ids: [str] # A comma-separated list of documents ids. You must define ids as parameter or set "ids" or "docs" in the request body, fields: any # A comma-separated list or wildcard expressions of fields to include in the statistics. It is used as the default list unless a specific field list is provided in the `completion_fields` or `fielddata_fields` parameters., field_statistics: bool # If `true`, the response includes the document count, sum of document frequencies, and sum of total term frequencies., offsets: bool # If `true`, the response includes term offsets., payloads: bool # If `true`, the response includes term payloads., positions: bool # If `true`, the response includes term positions., preference: str # The node or shard the operation should be performed on. It is random by default., realtime: bool # If true, the request is real-time as opposed to near-real-time., routing: any # A custom value used to route operations to a specific shard., term_statistics: bool # If true, the response includes term frequency and document frequency., version: num # If `true`, returns the document version as part of a hit., version_type: str # The version type., docs: [map{_id: any, _index: any, doc: map, fields: any, field_statistics: bool, filter: any, offsets: bool, payloads: bool, positions: bool, routing: any, term_statistics: bool, version: any, version_type: any}] # An array of existing or artificial documents., ids: [str] # A simplified syntax to specify documents by their ID if they're in the same index.}
@returns(200) {docs: [map]}
@example_request "{\n  \"docs\": [\n      {\n        \"_id\": \"2\",\n        \"fields\": [\n            \"message\"\n        ],\n        \"term_statistics\": true\n      },\n      {\n        \"_id\": \"1\"\n      }\n  ]\n}"

@endgroup

@group {index}
@endpoint GET /{index}/_mtermvectors
@desc Get multiple term vectors
@required {index: str # The name of the index that contains the documents.}
@optional {ids: [str] # A comma-separated list of documents ids. You must define ids as parameter or set "ids" or "docs" in the request body, fields: any # A comma-separated list or wildcard expressions of fields to include in the statistics. It is used as the default list unless a specific field list is provided in the `completion_fields` or `fielddata_fields` parameters., field_statistics: bool # If `true`, the response includes the document count, sum of document frequencies, and sum of total term frequencies., offsets: bool # If `true`, the response includes term offsets., payloads: bool # If `true`, the response includes term payloads., positions: bool # If `true`, the response includes term positions., preference: str # The node or shard the operation should be performed on. It is random by default., realtime: bool # If true, the request is real-time as opposed to near-real-time., routing: any # A custom value used to route operations to a specific shard., term_statistics: bool # If true, the response includes term frequency and document frequency., version: num # If `true`, returns the document version as part of a hit., version_type: str # The version type., docs: [map{_id: any, _index: any, doc: map, fields: any, field_statistics: bool, filter: any, offsets: bool, payloads: bool, positions: bool, routing: any, term_statistics: bool, version: any, version_type: any}] # An array of existing or artificial documents., ids: [str] # A simplified syntax to specify documents by their ID if they're in the same index.}
@returns(200) {docs: [map]}
@example_request "{\n  \"docs\": [\n      {\n        \"_id\": \"2\",\n        \"fields\": [\n            \"message\"\n        ],\n        \"term_statistics\": true\n      },\n      {\n        \"_id\": \"1\"\n      }\n  ]\n}"

@endpoint POST /{index}/_mtermvectors
@desc Get multiple term vectors
@required {index: str # The name of the index that contains the documents.}
@optional {ids: [str] # A comma-separated list of documents ids. You must define ids as parameter or set "ids" or "docs" in the request body, fields: any # A comma-separated list or wildcard expressions of fields to include in the statistics. It is used as the default list unless a specific field list is provided in the `completion_fields` or `fielddata_fields` parameters., field_statistics: bool # If `true`, the response includes the document count, sum of document frequencies, and sum of total term frequencies., offsets: bool # If `true`, the response includes term offsets., payloads: bool # If `true`, the response includes term payloads., positions: bool # If `true`, the response includes term positions., preference: str # The node or shard the operation should be performed on. It is random by default., realtime: bool # If true, the request is real-time as opposed to near-real-time., routing: any # A custom value used to route operations to a specific shard., term_statistics: bool # If true, the response includes term frequency and document frequency., version: num # If `true`, returns the document version as part of a hit., version_type: str # The version type., docs: [map{_id: any, _index: any, doc: map, fields: any, field_statistics: bool, filter: any, offsets: bool, payloads: bool, positions: bool, routing: any, term_statistics: bool, version: any, version_type: any}] # An array of existing or artificial documents., ids: [str] # A simplified syntax to specify documents by their ID if they're in the same index.}
@returns(200) {docs: [map]}
@example_request "{\n  \"docs\": [\n      {\n        \"_id\": \"2\",\n        \"fields\": [\n            \"message\"\n        ],\n        \"term_statistics\": true\n      },\n      {\n        \"_id\": \"1\"\n      }\n  ]\n}"

@endgroup

@group _nodes
@endpoint DELETE /_nodes/{node_id}/_repositories_metering/{max_archive_version}
@desc Clear the archived repositories metering
@required {node_id: any # Comma-separated list of node IDs or names used to limit returned information., max_archive_version: num # Specifies the maximum `archive_version` to be cleared from the archive.}
@returns(200)

@endpoint GET /_nodes/{node_id}/_repositories_metering
@desc Get cluster repositories metering
@required {node_id: any # Comma-separated list of node IDs or names used to limit returned information.}
@returns(200)

@endpoint GET /_nodes/hot_threads
@desc Get the hot threads for nodes
@optional {ignore_idle_threads: bool # If true, known idle threads (e.g. waiting in a socket select, or to get a task from an empty queue) are filtered out., interval: any # The interval to do the second sampling of threads., snapshots: num # Number of samples of thread stacktrace., threads: num # Specifies the number of hot threads to provide information for., timeout: any # Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error., type: str # The type to sample., sort: str # The sort order for 'cpu' type}
@returns(200)

@endpoint GET /_nodes/{node_id}/hot_threads
@desc Get the hot threads for nodes
@required {node_id: any # List of node IDs or names used to limit returned information.}
@optional {ignore_idle_threads: bool # If true, known idle threads (e.g. waiting in a socket select, or to get a task from an empty queue) are filtered out., interval: any # The interval to do the second sampling of threads., snapshots: num # Number of samples of thread stacktrace., threads: num # Specifies the number of hot threads to provide information for., timeout: any # Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error., type: str # The type to sample., sort: str # The sort order for 'cpu' type}
@returns(200)

@endpoint GET /_nodes
@desc Get node information
@optional {flat_settings: bool # If true, returns settings in flat format., timeout: any # Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error.}
@returns(200)

@endpoint GET /_nodes/{node_id}
@desc Get node information
@required {node_id: any # Comma-separated list of node IDs or names used to limit returned information.}
@optional {flat_settings: bool # If true, returns settings in flat format., timeout: any # Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error.}
@returns(200)

@endpoint GET /_nodes/{metric}
@desc Get node information
@required {metric: any # Limits the information returned to the specific metrics. Supports a comma-separated list, such as http,ingest.}
@optional {flat_settings: bool # If true, returns settings in flat format., timeout: any # Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error.}
@returns(200)

@endpoint GET /_nodes/{node_id}/{metric}
@desc Get node information
@required {node_id: any # Comma-separated list of node IDs or names used to limit returned information., metric: any # Limits the information returned to the specific metrics. Supports a comma-separated list, such as http,ingest.}
@optional {flat_settings: bool # If true, returns settings in flat format., timeout: any # Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error.}
@returns(200)

@endpoint POST /_nodes/reload_secure_settings
@desc Reload the keystore on nodes in the cluster
@optional {timeout: any # Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error., secure_settings_password: any # The password for the Elasticsearch keystore.}
@returns(200)
@example_request "{\n  \"secure_settings_password\": \"keystore-password\"\n}"

@endpoint POST /_nodes/{node_id}/reload_secure_settings
@desc Reload the keystore on nodes in the cluster
@required {node_id: any # The names of particular nodes in the cluster to target.}
@optional {timeout: any # Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error., secure_settings_password: any # The password for the Elasticsearch keystore.}
@returns(200)
@example_request "{\n  \"secure_settings_password\": \"keystore-password\"\n}"

@endpoint GET /_nodes/stats
@desc Get node statistics
@optional {completion_fields: any # Comma-separated list or wildcard expressions of fields to include in fielddata and suggest statistics., fielddata_fields: any # Comma-separated list or wildcard expressions of fields to include in fielddata statistics., fields: any # Comma-separated list or wildcard expressions of fields to include in the statistics., groups: bool # Comma-separated list of search groups to include in the search statistics., include_segment_file_sizes: bool # If true, the call reports the aggregated disk usage of each one of the Lucene index files (only applies if segment stats are requested)., level: str # Indicates whether statistics are aggregated at the node, indices, or shards level., timeout: any # Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error., types: [str] # A comma-separated list of document types for the indexing index metric., include_unloaded_segments: bool # If `true`, the response includes information from segments that are not loaded into memory.}
@returns(200)

@endpoint GET /_nodes/{node_id}/stats
@desc Get node statistics
@required {node_id: any # Comma-separated list of node IDs or names used to limit returned information.}
@optional {completion_fields: any # Comma-separated list or wildcard expressions of fields to include in fielddata and suggest statistics., fielddata_fields: any # Comma-separated list or wildcard expressions of fields to include in fielddata statistics., fields: any # Comma-separated list or wildcard expressions of fields to include in the statistics., groups: bool # Comma-separated list of search groups to include in the search statistics., include_segment_file_sizes: bool # If true, the call reports the aggregated disk usage of each one of the Lucene index files (only applies if segment stats are requested)., level: str # Indicates whether statistics are aggregated at the node, indices, or shards level., timeout: any # Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error., types: [str] # A comma-separated list of document types for the indexing index metric., include_unloaded_segments: bool # If `true`, the response includes information from segments that are not loaded into memory.}
@returns(200)

@endpoint GET /_nodes/stats/{metric}
@desc Get node statistics
@required {metric: any # Limits the information returned to the specific metrics.}
@optional {completion_fields: any # Comma-separated list or wildcard expressions of fields to include in fielddata and suggest statistics., fielddata_fields: any # Comma-separated list or wildcard expressions of fields to include in fielddata statistics., fields: any # Comma-separated list or wildcard expressions of fields to include in the statistics., groups: bool # Comma-separated list of search groups to include in the search statistics., include_segment_file_sizes: bool # If true, the call reports the aggregated disk usage of each one of the Lucene index files (only applies if segment stats are requested)., level: str # Indicates whether statistics are aggregated at the node, indices, or shards level., timeout: any # Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error., types: [str] # A comma-separated list of document types for the indexing index metric., include_unloaded_segments: bool # If `true`, the response includes information from segments that are not loaded into memory.}
@returns(200)

@endpoint GET /_nodes/{node_id}/stats/{metric}
@desc Get node statistics
@required {node_id: any # Comma-separated list of node IDs or names used to limit returned information., metric: any # Limits the information returned to the specific metrics.}
@optional {completion_fields: any # Comma-separated list or wildcard expressions of fields to include in fielddata and suggest statistics., fielddata_fields: any # Comma-separated list or wildcard expressions of fields to include in fielddata statistics., fields: any # Comma-separated list or wildcard expressions of fields to include in the statistics., groups: bool # Comma-separated list of search groups to include in the search statistics., include_segment_file_sizes: bool # If true, the call reports the aggregated disk usage of each one of the Lucene index files (only applies if segment stats are requested)., level: str # Indicates whether statistics are aggregated at the node, indices, or shards level., timeout: any # Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error., types: [str] # A comma-separated list of document types for the indexing index metric., include_unloaded_segments: bool # If `true`, the response includes information from segments that are not loaded into memory.}
@returns(200)

@endpoint GET /_nodes/stats/{metric}/{index_metric}
@desc Get node statistics
@required {metric: any # Limits the information returned to the specific metrics., index_metric: any # Limit the information returned for indices metric to the specific index metrics. It can be used only if indices (or all) metric is specified.}
@optional {completion_fields: any # Comma-separated list or wildcard expressions of fields to include in fielddata and suggest statistics., fielddata_fields: any # Comma-separated list or wildcard expressions of fields to include in fielddata statistics., fields: any # Comma-separated list or wildcard expressions of fields to include in the statistics., groups: bool # Comma-separated list of search groups to include in the search statistics., include_segment_file_sizes: bool # If true, the call reports the aggregated disk usage of each one of the Lucene index files (only applies if segment stats are requested)., level: str # Indicates whether statistics are aggregated at the node, indices, or shards level., timeout: any # Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error., types: [str] # A comma-separated list of document types for the indexing index metric., include_unloaded_segments: bool # If `true`, the response includes information from segments that are not loaded into memory.}
@returns(200)

@endpoint GET /_nodes/{node_id}/stats/{metric}/{index_metric}
@desc Get node statistics
@required {node_id: any # Comma-separated list of node IDs or names used to limit returned information., metric: any # Limits the information returned to the specific metrics., index_metric: any # Limit the information returned for indices metric to the specific index metrics. It can be used only if indices (or all) metric is specified.}
@optional {completion_fields: any # Comma-separated list or wildcard expressions of fields to include in fielddata and suggest statistics., fielddata_fields: any # Comma-separated list or wildcard expressions of fields to include in fielddata statistics., fields: any # Comma-separated list or wildcard expressions of fields to include in the statistics., groups: bool # Comma-separated list of search groups to include in the search statistics., include_segment_file_sizes: bool # If true, the call reports the aggregated disk usage of each one of the Lucene index files (only applies if segment stats are requested)., level: str # Indicates whether statistics are aggregated at the node, indices, or shards level., timeout: any # Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error., types: [str] # A comma-separated list of document types for the indexing index metric., include_unloaded_segments: bool # If `true`, the response includes information from segments that are not loaded into memory.}
@returns(200)

@endpoint GET /_nodes/usage
@desc Get feature usage information
@optional {timeout: any # Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error.}
@returns(200)

@endpoint GET /_nodes/{node_id}/usage
@desc Get feature usage information
@required {node_id: any # A comma-separated list of node IDs or names to limit the returned information. Use `_local` to return information from the node you're connecting to, leave empty to get information from all nodes.}
@optional {timeout: any # Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error.}
@returns(200)

@endpoint GET /_nodes/usage/{metric}
@desc Get feature usage information
@required {metric: any # Limits the information returned to the specific metrics. A comma-separated list of the following options: `_all`, `rest_actions`, `aggregations`.}
@optional {timeout: any # Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error.}
@returns(200)

@endpoint GET /_nodes/{node_id}/usage/{metric}
@desc Get feature usage information
@required {node_id: any # A comma-separated list of node IDs or names to limit the returned information. Use `_local` to return information from the node you're connecting to, leave empty to get information from all nodes., metric: any # Limits the information returned to the specific metrics. A comma-separated list of the following options: `_all`, `rest_actions`, `aggregations`.}
@optional {timeout: any # Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error.}
@returns(200)

@endgroup

@group {index}
@endpoint POST /{index}/_pit
@desc Open a point in time
@required {index: any # A comma-separated list of index names to open point in time; use `_all` or empty string to perform the operation on all indices, keep_alive: any # Extend the length of time that the point in time persists.}
@optional {ignore_unavailable: bool # If `false`, the request returns an error if it targets a concrete (non-wildcarded) index, alias, or data stream that is missing, closed, or otherwise unavailable. If `true`, unavailable concrete targets are silently ignored., preference: str # The node or shard the operation should be performed on. By default, it is random., routing: any # A custom value that is used to route operations to a specific shard., expand_wildcards: any # The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. It supports comma-separated values, such as `open,hidden`., allow_partial_search_results: bool # Indicates whether the point in time tolerates unavailable shards or shard failures when initially creating the PIT. If `false`, creating a point in time request when a shard is missing or unavailable will throw an exception. If `true`, the point in time will contain all the shards that are available at the time of the request., max_concurrent_shard_requests: num # Maximum number of concurrent shard requests that each sub-search request executes per node., index_filter: any # Filter indices if the provided query rewrites to `match_none` on every shard.}
@returns(200) {_shards: any, id: any}

@endgroup

@group _scripts
@endpoint PUT /_scripts/{id}/{context}
@desc Create or update a script or search template
@required {id: str # The identifier for the stored script or search template. It must be unique within the cluster., context: str # The context in which the script or search template should run. To prevent errors, the API immediately compiles the script or template in this context., script: any # The script or search template, its parameters, and its language.}
@optional {context: str # The context in which the script or search template should run. To prevent errors, the API immediately compiles the script or template in this context. If you specify both this and the `` path parameter, the API uses the request path parameter., master_timeout: any # The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. It can also be set to `-1` to indicate that the request should never timeout., timeout: any # The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. It can also be set to `-1` to indicate that the request should never timeout.}
@returns(200) {acknowledged: bool}
@example_request "{\n  \"script\": {\n    \"lang\": \"mustache\",\n    \"source\": {\n      \"query\": {\n        \"match\": {\n          \"message\": \"{{query_string}}\"\n        }\n      },\n      \"from\": \"{{from}}\",\n      \"size\": \"{{size}}\"\n    }\n  }\n}"

@endpoint POST /_scripts/{id}/{context}
@desc Create or update a script or search template
@required {id: str # The identifier for the stored script or search template. It must be unique within the cluster., context: str # The context in which the script or search template should run. To prevent errors, the API immediately compiles the script or template in this context., script: any # The script or search template, its parameters, and its language.}
@optional {context: str # The context in which the script or search template should run. To prevent errors, the API immediately compiles the script or template in this context. If you specify both this and the `` path parameter, the API uses the request path parameter., master_timeout: any # The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. It can also be set to `-1` to indicate that the request should never timeout., timeout: any # The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. It can also be set to `-1` to indicate that the request should never timeout.}
@returns(200) {acknowledged: bool}
@example_request "{\n  \"script\": {\n    \"lang\": \"mustache\",\n    \"source\": {\n      \"query\": {\n        \"match\": {\n          \"message\": \"{{query_string}}\"\n        }\n      },\n      \"from\": \"{{from}}\",\n      \"size\": \"{{size}}\"\n    }\n  }\n}"

@endgroup

@group _query_rules
@endpoint GET /_query_rules/{ruleset_id}/_rule/{rule_id}
@desc Get a query rule
@required {ruleset_id: str # The unique identifier of the query ruleset containing the rule to retrieve, rule_id: str # The unique identifier of the query rule within the specified ruleset to retrieve}
@returns(200) {rule_id: any, type: any, criteria: any, actions: any, priority: num}

@endpoint PUT /_query_rules/{ruleset_id}/_rule/{rule_id}
@desc Create or update a query rule
@required {ruleset_id: str # The unique identifier of the query ruleset containing the rule to be created or updated., rule_id: str # The unique identifier of the query rule within the specified ruleset to be created or updated., type: any # The type of rule., criteria: any # The criteria that must be met for the rule to be applied. If multiple criteria are specified for a rule, all criteria must be met for the rule to be applied., actions: any # The actions to take when the rule is matched. The format of this action depends on the rule type.}
@optional {priority: num}
@returns(200) {result: any}
@example_request "{\n  \"match_criteria\": {\n    \"query_string\": \"puggles\"\n  }\n}"

@endpoint DELETE /_query_rules/{ruleset_id}/_rule/{rule_id}
@desc Delete a query rule
@required {ruleset_id: str # The unique identifier of the query ruleset containing the rule to delete, rule_id: str # The unique identifier of the query rule within the specified ruleset to delete}
@returns(200) {acknowledged: bool}

@endpoint GET /_query_rules/{ruleset_id}
@desc Get a query ruleset
@required {ruleset_id: str # The unique identifier of the query ruleset}
@returns(200) {ruleset_id: any, rules: [map]}

@endpoint PUT /_query_rules/{ruleset_id}
@desc Create or update a query ruleset
@required {ruleset_id: str # The unique identifier of the query ruleset to be created or updated., rules: any}
@returns(200) {result: any}
@example_request "{\n    \"rules\": [\n        {\n            \"rule_id\": \"my-rule1\",\n            \"type\": \"pinned\",\n            \"criteria\": [\n                {\n                    \"type\": \"contains\",\n                    \"metadata\": \"user_query\",\n                    \"values\": [ \"pugs\", \"puggles\" ]\n                },\n                {\n                    \"type\": \"exact\",\n                    \"metadata\": \"user_country\",\n                    \"values\": [ \"us\" ]\n                }\n            ],\n            \"actions\": {\n                \"ids\": [\n                    \"id1\",\n                    \"id2\"\n                ]\n            }\n        },\n        {\n            \"rule_id\": \"my-rule2\",\n            \"type\": \"pinned\",\n            \"criteria\": [\n                {\n                    \"type\": \"fuzzy\",\n                    \"metadata\": \"user_query\",\n                    \"values\": [ \"rescue dogs\" ]\n                }\n            ],\n            \"actions\": {\n                \"docs\": [\n                    {\n                        \"_index\": \"index1\",\n                        \"_id\": \"id3\"\n                    },\n                    {\n                        \"_index\": \"index2\",\n                        \"_id\": \"id4\"\n                    }\n                ]\n            }\n        }\n    ]\n}"

@endpoint DELETE /_query_rules/{ruleset_id}
@desc Delete a query ruleset
@required {ruleset_id: str # The unique identifier of the query ruleset to delete}
@returns(200) {acknowledged: bool}

@endpoint GET /_query_rules
@desc Get all query rulesets
@optional {from: num # The offset from the first result to fetch., size: num # The maximum number of results to retrieve.}
@returns(200) {count: num, results: [map]}

@endpoint POST /_query_rules/{ruleset_id}/_test
@desc Test a query ruleset
@required {ruleset_id: str # The unique identifier of the query ruleset to be created or updated, match_criteria: map # The match criteria to apply to rules in the given query ruleset. Match criteria should match the keys defined in the `criteria.metadata` field of the rule.}
@returns(200) {total_matched_rules: num, matched_rules: [map]}
@example_request "{\n    \"rules\": [\n        {\n            \"rule_id\": \"my-rule1\",\n            \"type\": \"pinned\",\n            \"criteria\": [\n                {\n                    \"type\": \"contains\",\n                    \"metadata\": \"user_query\",\n                    \"values\": [ \"pugs\", \"puggles\" ]\n                },\n                {\n                    \"type\": \"exact\",\n                    \"metadata\": \"user_country\",\n                    \"values\": [ \"us\" ]\n                }\n            ],\n            \"actions\": {\n                \"ids\": [\n                    \"id1\",\n                    \"id2\"\n                ]\n            }\n        },\n        {\n            \"rule_id\": \"my-rule2\",\n            \"type\": \"pinned\",\n            \"criteria\": [\n                {\n                    \"type\": \"fuzzy\",\n                    \"metadata\": \"user_query\",\n                    \"values\": [ \"rescue dogs\" ]\n                }\n            ],\n            \"actions\": {\n                \"docs\": [\n                    {\n                        \"_index\": \"index1\",\n                        \"_id\": \"id3\"\n                    },\n                    {\n                        \"_index\": \"index2\",\n                        \"_id\": \"id4\"\n                    }\n                ]\n            }\n        }\n    ]\n}"

@endgroup

@group _rank_eval
@endpoint GET /_rank_eval
@desc Evaluate ranked search results
@required {requests: [map{id!: any, request: any, ratings!: [map], template_id: any, params: map}] # A set of typical search requests, together with their provided ratings.}
@optional {allow_no_indices: bool # A setting that does two separate checks on the index expression. If `false`, the request returns an error (1) if any wildcard expression (including `_all` and `*`) resolves to zero matching indices or (2) if the complete set of resolved indices, aliases or data streams is empty after all expressions are evaluated. If `true`, index expressions that resolve to no indices are allowed and the request returns an empty result., expand_wildcards: any # Whether to expand wildcard expression to concrete indices that are open, closed or both., ignore_unavailable: bool # If `false`, the request returns an error if it targets a concrete (non-wildcarded) index, alias, or data stream that is missing, closed, or otherwise unavailable. If `true`, unavailable concrete targets are silently ignored., search_type: str # Search operation type, metric: any # Definition of the evaluation metric to calculate.}
@returns(200) {metric_score: num, details: map, failures: map}
@example_request "{\n  \"requests\": [\n    {\n      \"id\": \"JFK query\",\n      \"request\": { \"query\": { \"match_all\": {} } },\n      \"ratings\": []\n    } ],\n  \"metric\": {\n    \"precision\": {\n      \"k\": 20,\n      \"relevant_rating_threshold\": 1,\n      \"ignore_unlabeled\": false\n    }\n  }\n}"

@endpoint POST /_rank_eval
@desc Evaluate ranked search results
@required {requests: [map{id!: any, request: any, ratings!: [map], template_id: any, params: map}] # A set of typical search requests, together with their provided ratings.}
@optional {allow_no_indices: bool # A setting that does two separate checks on the index expression. If `false`, the request returns an error (1) if any wildcard expression (including `_all` and `*`) resolves to zero matching indices or (2) if the complete set of resolved indices, aliases or data streams is empty after all expressions are evaluated. If `true`, index expressions that resolve to no indices are allowed and the request returns an empty result., expand_wildcards: any # Whether to expand wildcard expression to concrete indices that are open, closed or both., ignore_unavailable: bool # If `false`, the request returns an error if it targets a concrete (non-wildcarded) index, alias, or data stream that is missing, closed, or otherwise unavailable. If `true`, unavailable concrete targets are silently ignored., search_type: str # Search operation type, metric: any # Definition of the evaluation metric to calculate.}
@returns(200) {metric_score: num, details: map, failures: map}
@example_request "{\n  \"requests\": [\n    {\n      \"id\": \"JFK query\",\n      \"request\": { \"query\": { \"match_all\": {} } },\n      \"ratings\": []\n    } ],\n  \"metric\": {\n    \"precision\": {\n      \"k\": 20,\n      \"relevant_rating_threshold\": 1,\n      \"ignore_unlabeled\": false\n    }\n  }\n}"

@endgroup

@group {index}
@endpoint GET /{index}/_rank_eval
@desc Evaluate ranked search results
@required {index: any # A  comma-separated list of data streams, indices, and index aliases used to limit the request. Wildcard (`*`) expressions are supported. To target all data streams and indices in a cluster, omit this parameter or use `_all` or `*`., requests: [map{id!: any, request: any, ratings!: [map], template_id: any, params: map}] # A set of typical search requests, together with their provided ratings.}
@optional {allow_no_indices: bool # A setting that does two separate checks on the index expression. If `false`, the request returns an error (1) if any wildcard expression (including `_all` and `*`) resolves to zero matching indices or (2) if the complete set of resolved indices, aliases or data streams is empty after all expressions are evaluated. If `true`, index expressions that resolve to no indices are allowed and the request returns an empty result., expand_wildcards: any # Whether to expand wildcard expression to concrete indices that are open, closed or both., ignore_unavailable: bool # If `false`, the request returns an error if it targets a concrete (non-wildcarded) index, alias, or data stream that is missing, closed, or otherwise unavailable. If `true`, unavailable concrete targets are silently ignored., search_type: str # Search operation type, metric: any # Definition of the evaluation metric to calculate.}
@returns(200) {metric_score: num, details: map, failures: map}
@example_request "{\n  \"requests\": [\n    {\n      \"id\": \"JFK query\",\n      \"request\": { \"query\": { \"match_all\": {} } },\n      \"ratings\": []\n    } ],\n  \"metric\": {\n    \"precision\": {\n      \"k\": 20,\n      \"relevant_rating_threshold\": 1,\n      \"ignore_unlabeled\": false\n    }\n  }\n}"

@endpoint POST /{index}/_rank_eval
@desc Evaluate ranked search results
@required {index: any # A  comma-separated list of data streams, indices, and index aliases used to limit the request. Wildcard (`*`) expressions are supported. To target all data streams and indices in a cluster, omit this parameter or use `_all` or `*`., requests: [map{id!: any, request: any, ratings!: [map], template_id: any, params: map}] # A set of typical search requests, together with their provided ratings.}
@optional {allow_no_indices: bool # A setting that does two separate checks on the index expression. If `false`, the request returns an error (1) if any wildcard expression (including `_all` and `*`) resolves to zero matching indices or (2) if the complete set of resolved indices, aliases or data streams is empty after all expressions are evaluated. If `true`, index expressions that resolve to no indices are allowed and the request returns an empty result., expand_wildcards: any # Whether to expand wildcard expression to concrete indices that are open, closed or both., ignore_unavailable: bool # If `false`, the request returns an error if it targets a concrete (non-wildcarded) index, alias, or data stream that is missing, closed, or otherwise unavailable. If `true`, unavailable concrete targets are silently ignored., search_type: str # Search operation type, metric: any # Definition of the evaluation metric to calculate.}
@returns(200) {metric_score: num, details: map, failures: map}
@example_request "{\n  \"requests\": [\n    {\n      \"id\": \"JFK query\",\n      \"request\": { \"query\": { \"match_all\": {} } },\n      \"ratings\": []\n    } ],\n  \"metric\": {\n    \"precision\": {\n      \"k\": 20,\n      \"relevant_rating_threshold\": 1,\n      \"ignore_unlabeled\": false\n    }\n  }\n}"

@endgroup

@group _reindex
@endpoint POST /_reindex
@desc Reindex documents
@required {dest: any # The destination you are copying to., source: any # The source you are copying from.}
@optional {refresh: bool # If `true`, the request refreshes affected shards to make this operation visible to search., requests_per_second: num # The throttle for this request in sub-requests per second. By default, there is no throttle., scroll: any # The period of time that a consistent view of the index should be maintained for scrolled search., slices: any # The number of slices this task should be divided into. It defaults to one slice, which means the task isn't sliced into subtasks.  Reindex supports sliced scroll to parallelize the reindexing process. This parallelization can improve efficiency and provide a convenient way to break the request down into smaller parts.  NOTE: Reindexing from remote clusters does not support manual or automatic slicing.  If set to `auto`, Elasticsearch chooses the number of slices to use. This setting will use one slice per shard, up to a certain limit. If there are multiple sources, it will choose the number of slices based on the index or backing index with the smallest number of shards., max_docs: num # The maximum number of documents to reindex. By default, all documents are reindexed. If it is a value less then or equal to `scroll_size`, a scroll will not be used to retrieve the results for the operation.  If `conflicts` is set to `proceed`, the reindex operation could attempt to reindex more documents from the source than `max_docs` until it has successfully indexed `max_docs` documents into the target or it has gone through every document in the source query., timeout: any # The period each indexing waits for automatic index creation, dynamic mapping updates, and waiting for active shards. By default, Elasticsearch waits for at least one minute before failing. The actual wait time could be longer, particularly when multiple waits occur., wait_for_active_shards: any # The number of shard copies that must be active before proceeding with the operation. Set it to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). The default value is one, which means it waits for each primary shard to be active., wait_for_completion: bool # If `true`, the request blocks until the operation is complete., require_alias: bool # If `true`, the destination must be an index alias., conflicts: any=abort # Indicates whether to continue reindexing even when there are conflicts., max_docs: num # The maximum number of documents to reindex. By default, all documents are reindexed. If it is a value less then or equal to `scroll_size`, a scroll will not be used to retrieve the results for the operation.  If `conflicts` is set to `proceed`, the reindex operation could attempt to reindex more documents from the source than `max_docs` until it has successfully indexed `max_docs` documents into the target or it has gone through every document in the source query., script: any # The script to run to update the document source or metadata when reindexing.}
@returns(200) {batches: num, created: num, deleted: num, failures: [map], noops: num, retries: any, requests_per_second: num, slice_id: num, slices: [map], task: any, throttled_millis: any, throttled_until_millis: any, timed_out: bool, took: any, total: num, updated: num, version_conflicts: num}
@example_request "{\n  \"source\": {\n    \"index\": [\"my-index-000001\", \"my-index-000002\"]\n  },\n  \"dest\": {\n    \"index\": \"my-new-index-000002\"\n  }\n}"

@endpoint POST /_reindex/{task_id}/_rethrottle
@desc Throttle a reindex operation
@required {task_id: str # The task identifier, which can be found by using the tasks API., requests_per_second: num # The throttle for this request in sub-requests per second. It can be either `-1` to turn off throttling or any decimal number like `1.7` or `12` to throttle to that level.}
@optional {group_by: str}
@returns(200) {node_failures: [map], task_failures: [map], nodes: map, tasks: any}

@endgroup

@group _render
@endpoint GET /_render/template
@desc Render a search template
@optional {id: any # The ID of the search template to render. If no `source` is specified, this or the `` request path parameter is required. If you specify both this parameter and the `` parameter, the API uses only ``., file: str, params: map # Key-value pairs used to replace Mustache variables in the template. The key is the variable name. The value is the variable value., source: any # An inline search template. It supports the same parameters as the search API's request body. These parameters also support Mustache variables. If no `id` or `` is specified, this parameter is required.}
@returns(200) {template_output: map}
@example_request "{\n  \"id\": \"my-search-template\",\n  \"params\": {\n    \"query_string\": \"hello world\",\n    \"from\": 20,\n    \"size\": 10\n  }\n}"

@endpoint POST /_render/template
@desc Render a search template
@optional {id: any # The ID of the search template to render. If no `source` is specified, this or the `` request path parameter is required. If you specify both this parameter and the `` parameter, the API uses only ``., file: str, params: map # Key-value pairs used to replace Mustache variables in the template. The key is the variable name. The value is the variable value., source: any # An inline search template. It supports the same parameters as the search API's request body. These parameters also support Mustache variables. If no `id` or `` is specified, this parameter is required.}
@returns(200) {template_output: map}
@example_request "{\n  \"id\": \"my-search-template\",\n  \"params\": {\n    \"query_string\": \"hello world\",\n    \"from\": 20,\n    \"size\": 10\n  }\n}"

@endpoint GET /_render/template/{id}
@desc Render a search template
@required {id: str # The ID of the search template to render. If no `source` is specified, this or the `id` request body parameter is required.}
@optional {id: any # The ID of the search template to render. If no `source` is specified, this or the `` request path parameter is required. If you specify both this parameter and the `` parameter, the API uses only ``., file: str, params: map # Key-value pairs used to replace Mustache variables in the template. The key is the variable name. The value is the variable value., source: any # An inline search template. It supports the same parameters as the search API's request body. These parameters also support Mustache variables. If no `id` or `` is specified, this parameter is required.}
@returns(200) {template_output: map}
@example_request "{\n  \"id\": \"my-search-template\",\n  \"params\": {\n    \"query_string\": \"hello world\",\n    \"from\": 20,\n    \"size\": 10\n  }\n}"

@endpoint POST /_render/template/{id}
@desc Render a search template
@required {id: str # The ID of the search template to render. If no `source` is specified, this or the `id` request body parameter is required.}
@optional {id: any # The ID of the search template to render. If no `source` is specified, this or the `` request path parameter is required. If you specify both this parameter and the `` parameter, the API uses only ``., file: str, params: map # Key-value pairs used to replace Mustache variables in the template. The key is the variable name. The value is the variable value., source: any # An inline search template. It supports the same parameters as the search API's request body. These parameters also support Mustache variables. If no `id` or `` is specified, this parameter is required.}
@returns(200) {template_output: map}
@example_request "{\n  \"id\": \"my-search-template\",\n  \"params\": {\n    \"query_string\": \"hello world\",\n    \"from\": 20,\n    \"size\": 10\n  }\n}"

@endgroup

@group _rollup
@endpoint GET /_rollup/job/{id}
@desc Get rollup job information
@required {id: str # Identifier for the rollup job. If it is `_all` or omitted, the API returns all rollup jobs.}
@returns(200) {jobs: [map]}

@endpoint PUT /_rollup/job/{id}
@desc Create a rollup job
@required {id: str # Identifier for the rollup job. This can be any alphanumeric string and uniquely identifies the data that is associated with the rollup job. The ID is persistent; it is stored with the rolled up data. If you create a job, let it run for a while, then delete the job, the data that the job rolled up is still be associated with this job ID. You cannot create a new job with the same ID since that could lead to problems with mismatched job configurations., cron: str # A cron string which defines the intervals when the rollup job should be executed. When the interval triggers, the indexer attempts to rollup the data in the index pattern. The cron pattern is unrelated to the time interval of the data being rolled up. For example, you may wish to create hourly rollups of your document but to only run the indexer on a daily basis at midnight, as defined by the cron. The cron pattern is defined just like a Watcher cron schedule., groups: any # Defines the grouping fields and aggregations that are defined for this rollup job. These fields will then be available later for aggregating into buckets. These aggs and fields can be used in any combination. Think of the groups configuration as defining a set of tools that can later be used in aggregations to partition the data. Unlike raw data, we have to think ahead to which fields and aggregations might be used. Rollups provide enough flexibility that you simply need to determine which fields are needed, not in what order they are needed., index_pattern: str # The index or index pattern to roll up. Supports wildcard-style patterns (`logstash-*`). The job attempts to rollup the entire index or index-pattern., page_size: num # The number of bucket results that are processed on each iteration of the rollup indexer. A larger value tends to execute faster, but requires more memory during processing. This value has no effect on how the data is rolled up; it is merely used for tweaking the speed or memory cost of the indexer., rollup_index: any # The index that contains the rollup results. The index can be shared with other rollup jobs. The data is stored so that it doesn’t interfere with unrelated jobs.}
@optional {metrics: [map{field!: any, metrics!: [str]}] # Defines the metrics to collect for each grouping tuple. By default, only the doc_counts are collected for each group. To make rollup useful, you will often add metrics like averages, mins, maxes, etc. Metrics are defined on a per-field basis and for each field you configure which metric should be collected., timeout: any=20s # Time to wait for the request to complete., headers: any}
@returns(200) {acknowledged: bool}
@example_request "{\n  \"index_pattern\": \"sensor-*\",\n  \"rollup_index\": \"sensor_rollup\",\n  \"cron\": \"*/30 * * * * ?\",\n  \"page_size\": 1000,\n  \"groups\": {\n    \"date_histogram\": {\n      \"field\": \"timestamp\",\n      \"fixed_interval\": \"1h\",\n      \"delay\": \"7d\"\n    },\n    \"terms\": {\n      \"fields\": [ \"node\" ]\n    }\n  },\n  \"metrics\": [\n      {\n      \"field\": \"temperature\",\n      \"metrics\": [ \"min\", \"max\", \"sum\" ]\n    },\n    {\n      \"field\": \"voltage\",\n      \"metrics\": [ \"avg\" ]\n    }\n  ]\n}"

@endpoint DELETE /_rollup/job/{id}
@desc Delete a rollup job
@required {id: str # Identifier for the job.}
@returns(200) {acknowledged: bool, task_failures: [map]}

@endpoint GET /_rollup/job
@desc Get rollup job information
@returns(200) {jobs: [map]}

@endpoint GET /_rollup/data/{id}
@desc Get the rollup job capabilities
@required {id: str # Index, indices or index-pattern to return rollup capabilities for. `_all` may be used to fetch rollup capabilities from all jobs.}
@returns(200)

@endpoint GET /_rollup/data
@desc Get the rollup job capabilities
@returns(200)

@endgroup

@group {index}
@endpoint GET /{index}/_rollup/data
@desc Get the rollup index capabilities
@required {index: any # Data stream or index to check for rollup capabilities. Wildcard (`*`) expressions are supported.}
@returns(200)

@endpoint GET /{index}/_rollup_search
@desc Search rolled-up data
@required {index: any # A comma-separated list of data streams and indices used to limit the request. This parameter has the following rules:  * At least one data stream, index, or wildcard expression must be specified. This target can include a rollup or non-rollup index. For data streams, the stream's backing indices can only serve as non-rollup indices. Omitting the parameter or using `_all` are not permitted. * Multiple non-rollup indices may be specified. * Only one rollup index may be specified. If more than one are supplied, an exception occurs. * Wildcard expressions (`*`) may be used. If they match more than one rollup index, an exception occurs. However, you can use an expression to match multiple non-rollup indices or data streams.}
@optional {rest_total_hits_as_int: bool # Indicates whether hits.total should be rendered as an integer or an object in the rest search response, typed_keys: bool # Specify whether aggregation and suggester names should be prefixed by their respective types in the response, aggregations: map # Specifies aggregations., query: any # Specifies a DSL query that is subject to some limitations., size: num # Must be zero if set, as rollups work on pre-aggregated data.}
@returns(200) {took: num, timed_out: bool, terminated_early: bool, _shards: any, hits: any, aggregations: map}
@example_request "{\n  \"size\": 0,\n  \"aggregations\": {\n    \"max_temperature\": {\n      \"max\": {\n        \"field\": \"temperature\"\n      }\n    }\n  }\n}"

@endpoint POST /{index}/_rollup_search
@desc Search rolled-up data
@required {index: any # A comma-separated list of data streams and indices used to limit the request. This parameter has the following rules:  * At least one data stream, index, or wildcard expression must be specified. This target can include a rollup or non-rollup index. For data streams, the stream's backing indices can only serve as non-rollup indices. Omitting the parameter or using `_all` are not permitted. * Multiple non-rollup indices may be specified. * Only one rollup index may be specified. If more than one are supplied, an exception occurs. * Wildcard expressions (`*`) may be used. If they match more than one rollup index, an exception occurs. However, you can use an expression to match multiple non-rollup indices or data streams.}
@optional {rest_total_hits_as_int: bool # Indicates whether hits.total should be rendered as an integer or an object in the rest search response, typed_keys: bool # Specify whether aggregation and suggester names should be prefixed by their respective types in the response, aggregations: map # Specifies aggregations., query: any # Specifies a DSL query that is subject to some limitations., size: num # Must be zero if set, as rollups work on pre-aggregated data.}
@returns(200) {took: num, timed_out: bool, terminated_early: bool, _shards: any, hits: any, aggregations: map}
@example_request "{\n  \"size\": 0,\n  \"aggregations\": {\n    \"max_temperature\": {\n      \"max\": {\n        \"field\": \"temperature\"\n      }\n    }\n  }\n}"

@endgroup

@group _rollup
@endpoint POST /_rollup/job/{id}/_start
@desc Start rollup jobs
@required {id: str # Identifier for the rollup job.}
@returns(200) {started: bool}

@endpoint POST /_rollup/job/{id}/_stop
@desc Stop rollup jobs
@required {id: str # Identifier for the rollup job.}
@optional {timeout: any # If `wait_for_completion` is `true`, the API blocks for (at maximum) the specified duration while waiting for the job to stop. If more than `timeout` time has passed, the API throws a timeout exception. NOTE: Even if a timeout occurs, the stop request is still processing and eventually moves the job to STOPPED. The timeout simply means the API call itself timed out while waiting for the status change., wait_for_completion: bool # If set to `true`, causes the API to block until the indexer state completely stops. If set to `false`, the API returns immediately and the indexer is stopped asynchronously in the background.}
@returns(200) {stopped: bool}

@endgroup

@group _scripts
@endpoint GET /_scripts/painless/_execute
@desc Run a script
@optional {context: any=painless_test # The context that the script should run in. NOTE: Result ordering in the field contexts is not guaranteed., context_setup: any # Additional parameters for the `context`. NOTE: This parameter is required for all contexts except `painless_test`, which is the default if no value is provided for `context`., script: any # The Painless script to run.}
@returns(200) {result: map}
@example_request "{\n  \"script\": {\n    \"source\": \"params.count / params.total\",\n    \"params\": {\n      \"count\": 100.0,\n      \"total\": 1000.0\n    }\n  }\n}"

@endpoint POST /_scripts/painless/_execute
@desc Run a script
@optional {context: any=painless_test # The context that the script should run in. NOTE: Result ordering in the field contexts is not guaranteed., context_setup: any # Additional parameters for the `context`. NOTE: This parameter is required for all contexts except `painless_test`, which is the default if no value is provided for `context`., script: any # The Painless script to run.}
@returns(200) {result: map}
@example_request "{\n  \"script\": {\n    \"source\": \"params.count / params.total\",\n    \"params\": {\n      \"count\": 100.0,\n      \"total\": 1000.0\n    }\n  }\n}"

@endgroup

@group _search
@endpoint GET /_search
@desc Run a search
@optional {allow_no_indices: bool # A setting that does two separate checks on the index expression. If `false`, the request returns an error (1) if any wildcard expression (including `_all` and `*`) resolves to zero matching indices or (2) if the complete set of resolved indices, aliases or data streams is empty after all expressions are evaluated. If `true`, index expressions that resolve to no indices are allowed and the request returns an empty result., allow_partial_search_results: bool # If `true` and there are shard request timeouts or shard failures, the request returns partial results. If `false`, it returns an error with no partial results.  To override the default behavior, you can set the `search.default_allow_partial_results` cluster setting to `false`., analyzer: str # The analyzer to use for the query string. This parameter can be used only when the `q` query string parameter is specified., analyze_wildcard: bool # If `true`, wildcard and prefix queries are analyzed. This parameter can be used only when the `q` query string parameter is specified., batched_reduce_size: num # The number of shard results that should be reduced at once on the coordinating node. If the potential number of shards in the request can be large, this value should be used as a protection mechanism to reduce the memory overhead per search request., ccs_minimize_roundtrips: bool # If `true`, network round-trips between the coordinating node and the remote clusters are minimized when running cross-cluster search (CCS) requests., default_operator: str # The default operator for the query string query: `and` or `or`. This parameter can be used only when the `q` query string parameter is specified., df: str # The field to use as a default when no field prefix is given in the query string. This parameter can be used only when the `q` query string parameter is specified., docvalue_fields: any # A comma-separated list of fields to return as the docvalue representation of a field for each hit., expand_wildcards: any # The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. It supports comma-separated values such as `open,hidden`., explain: bool # If `true`, the request returns detailed information about score computation as part of a hit., ignore_throttled: bool # If `true`, concrete, expanded or aliased indices will be ignored when frozen., ignore_unavailable: bool # If `false`, the request returns an error if it targets a concrete (non-wildcarded) index, alias, or data stream that is missing, closed, or otherwise unavailable. If `true`, unavailable concrete targets are silently ignored., include_named_queries_score: bool # If `true`, the response includes the score contribution from any named queries.  This functionality reruns each named query on every hit in a search response. Typically, this adds a small overhead to a request. However, using computationally expensive named queries on a large number of hits may add significant overhead., lenient: bool # If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored. This parameter can be used only when the `q` query string parameter is specified., max_concurrent_shard_requests: num # The number of concurrent shard requests per node that the search runs concurrently. This value should be used to limit the impact of the search on the cluster in order to limit the number of concurrent shard requests., preference: str # The nodes and shards used for the search. By default, Elasticsearch selects from eligible nodes and shards using adaptive replica selection, accounting for allocation awareness. Valid values are:  * `_only_local` to run the search only on shards on the local node. * `_local` to, if possible, run the search on shards on the local node, or if not, select shards using the default method. * `_only_nodes:,` to run the search on only the specified nodes IDs. If suitable shards exist on more than one selected node, use shards on those nodes using the default method. If none of the specified nodes are available, select shards from any available node using the default method. * `_prefer_nodes:,` to if possible, run the search on the specified nodes IDs. If not, select shards using the default method. * `_shards:,` to run the search only on the specified shards. You can combine this value with other `preference` values. However, the `_shards` value must come first. For example: `_shards:2,3|_local`. * `` (any string that does not start with `_`) to route searches with the same `` to the same shards in the same order., pre_filter_shard_size: num # A threshold that enforces a pre-filter roundtrip to prefilter search shards based on query rewriting if the number of shards the search request expands to exceeds the threshold. This filter roundtrip can limit the number of shards significantly if for instance a shard can not match any documents based on its rewrite method (if date filters are mandatory to match but the shard bounds and the query are disjoint). When unspecified, the pre-filter phase is executed if any of these conditions is met:  * The request targets more than 128 shards. * The request targets one or more read-only index. * The primary sort of the query targets an indexed field., request_cache: bool # If `true`, the caching of search results is enabled for requests where `size` is `0`. It defaults to index level settings., routing: any # A custom value that is used to route operations to a specific shard., scroll: any # The period to retain the search context for scrolling. By default, this value cannot exceed `1d` (24 hours). You can change this limit by using the `search.max_keep_alive` cluster-level setting., search_type: str # Indicates how distributed term frequencies are calculated for relevance scoring., stats: [str] # Specific `tag` of the request for logging and statistical purposes., stored_fields: any # A comma-separated list of stored fields to return as part of a hit. If no fields are specified, no stored fields are included in the response. If this field is specified, the `_source` parameter defaults to `false`. You can pass `_source: true` to return both source fields and stored fields in the search response., suggest_field: str # The field to use for suggestions., suggest_mode: str # The suggest mode. This parameter can be used only when the `suggest_field` and `suggest_text` query string parameters are specified., suggest_size: num # The number of suggestions to return. This parameter can be used only when the `suggest_field` and `suggest_text` query string parameters are specified., suggest_text: str # The source text for which the suggestions should be returned. This parameter can be used only when the `suggest_field` and `suggest_text` query string parameters are specified., terminate_after: num # The maximum number of documents to collect for each shard. If a query reaches this limit, Elasticsearch terminates the query early. Elasticsearch collects documents before sorting.  IMPORTANT: Use with caution. Elasticsearch applies this parameter to each shard handling the request. When possible, let Elasticsearch perform early termination automatically. Avoid specifying this parameter for requests that target data streams with backing indices across multiple data tiers. If set to `0` (default), the query does not terminate early., timeout: any # The period of time to wait for a response from each shard. If no response is received before the timeout expires, the request fails and returns an error. It defaults to no timeout., track_total_hits: any # The number of hits matching the query to count accurately. If `true`, the exact number of hits is returned at the cost of some performance. If `false`, the response does not include the total number of hits matching the query., track_scores: bool # If `true`, the request calculates and returns document scores, even if the scores are not used for sorting., typed_keys: bool # If `true`, aggregation and suggester names are be prefixed by their respective types in the response., rest_total_hits_as_int: bool # Indicates whether `hits.total` should be rendered as an integer or an object in the rest search response., version: bool # If `true`, the request returns the document version as part of a hit., _source: any # The source fields that are returned for matching documents. These fields are returned in the `hits._source` property of the search response. Valid values are:  * `true` to return the entire document source. * `false` to not return the document source. * `` to return the source fields that are specified as a comma-separated list that supports wildcard (`*`) patterns., _source_excludes: any # A comma-separated list of source fields to exclude from the response. You can also use this parameter to exclude fields from the subset specified in `_source_includes` query parameter. If the `_source` parameter is `false`, this parameter is ignored., _source_exclude_vectors: bool # Whether vectors should be excluded from _source, _source_includes: any # A comma-separated list of source fields to include in the response. If this parameter is specified, only these source fields are returned. You can exclude fields from this subset using the `_source_excludes` query parameter. If the `_source` parameter is `false`, this parameter is ignored., seq_no_primary_term: bool # If `true`, the request returns the sequence number and primary term of the last modification of each hit., q: str # A query in the Lucene query string syntax. Query parameter searches do not support the full Elasticsearch Query DSL but are handy for testing.  IMPORTANT: This parameter overrides the query parameter in the request body. If both parameters are specified, documents matching the query request body parameter are not returned., size: num # The number of hits to return. By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. To page through more hits, use the `search_after` parameter., from: num # The starting document offset, which must be non-negative. By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. To page through more hits, use the `search_after` parameter., sort: any # A comma-separated list of `:` pairs., aggregations: map # Defines the aggregations that are run as part of the search request., collapse: any # Collapses search results the values of the specified field., explain: bool=false # If `true`, the request returns detailed information about score computation as part of a hit., ext: map # Configuration of search extensions defined by Elasticsearch plugins., from: num=0 # The starting document offset, which must be non-negative. By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. To page through more hits, use the `search_after` parameter., highlight: any # Specifies the highlighter to use for retrieving highlighted snippets from one or more fields in your search results., track_total_hits: any=10000 # Number of hits matching the query to count accurately. If `true`, the exact number of hits is returned at the cost of some performance. If `false`, the  response does not include the total number of hits matching the query., indices_boost: [map] # Boost the `_score` of documents from specified indices. The boost value is the factor by which scores are multiplied. A boost value greater than `1.0` increases the score. A boost value between `0` and `1.0` decreases the score., docvalue_fields: [map{field!: any, format: str, include_unmapped: bool}] # An array of wildcard (`*`) field patterns. The request returns doc values for field names matching these patterns in the `hits.fields` property of the response., knn: any # The approximate kNN search to run., rank: any # The Reciprocal Rank Fusion (RRF) to use., min_score: num # The minimum `_score` for matching documents. Documents with a lower `_score` are not included in search results and results collected by aggregations., post_filter: any # Use the `post_filter` parameter to filter search results. The search hits are filtered after the aggregations are calculated. A post filter has no impact on the aggregation results., profile: bool=false # Set to `true` to return detailed timing information about the execution of individual components in a search request. NOTE: This is a debugging tool and adds significant overhead to search execution., query: any # The search definition using the Query DSL., rescore: any # Can be used to improve precision by reordering just the top (for example 100 - 500) documents returned by the `query` and `post_filter` phases., retriever: any # A retriever is a specification to describe top documents returned from a search. A retriever replaces other elements of the search API that also return top documents such as `query` and `knn`., script_fields: map # Retrieve a script evaluation (based on different fields) for each hit., search_after: any # Used to retrieve the next page of hits using a set of sort values from the previous page., size: num=10 # The number of hits to return, which must not be negative. By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. To page through more hits, use the `search_after` property., slice: any # Split a scrolled search into multiple slices that can be consumed independently., sort: any # A comma-separated list of : pairs., _source: any # The source fields that are returned for matching documents. These fields are returned in the `hits._source` property of the search response. If the `stored_fields` property is specified, the `_source` property defaults to `false`. Otherwise, it defaults to `true`., fields: [map{field!: any, format: str, include_unmapped: bool}] # An array of wildcard (`*`) field patterns. The request returns values for field names matching these patterns in the `hits.fields` property of the response., suggest: any # Defines a suggester that provides similar looking terms based on a provided text., terminate_after: num=0 # The maximum number of documents to collect for each shard. If a query reaches this limit, Elasticsearch terminates the query early. Elasticsearch collects documents before sorting.  IMPORTANT: Use with caution. Elasticsearch applies this property to each shard handling the request. When possible, let Elasticsearch perform early termination automatically. Avoid specifying this property for requests that target data streams with backing indices across multiple data tiers.  If set to `0` (default), the query does not terminate early., timeout: str # The period of time to wait for a response from each shard. If no response is received before the timeout expires, the request fails and returns an error. Defaults to no timeout., track_scores: bool=false # If `true`, calculate and return document scores, even if the scores are not used for sorting., version: bool=false # If `true`, the request returns the document version as part of a hit., seq_no_primary_term: bool # If `true`, the request returns sequence number and primary term of the last modification of each hit., stored_fields: any # A comma-separated list of stored fields to return as part of a hit. If no fields are specified, no stored fields are included in the response. If this field is specified, the `_source` property defaults to `false`. You can pass `_source: true` to return both source fields and stored fields in the search response., pit: any # Limit the search to a point in time (PIT). If you provide a PIT, you cannot specify an `` in the request path., runtime_mappings: any # One or more runtime fields in the search request. These fields take precedence over mapped fields with the same name., stats: [str] # The stats groups to associate with the search. Each group maintains a statistics aggregation for its associated searches. You can retrieve these stats using the indices stats API.}
@returns(200) {took: num, timed_out: bool, _shards: any, hits: any, aggregations: map, _clusters: any, fields: map, max_score: num, num_reduce_phases: num, profile: any, pit_id: any, _scroll_id: any, suggest: map, terminated_early: bool}
@example_request "{\n  \"query\": {\n    \"term\": {\n      \"user.id\": \"kimchy\"\n    }\n  }\n}"

@endpoint POST /_search
@desc Run a search
@optional {allow_no_indices: bool # A setting that does two separate checks on the index expression. If `false`, the request returns an error (1) if any wildcard expression (including `_all` and `*`) resolves to zero matching indices or (2) if the complete set of resolved indices, aliases or data streams is empty after all expressions are evaluated. If `true`, index expressions that resolve to no indices are allowed and the request returns an empty result., allow_partial_search_results: bool # If `true` and there are shard request timeouts or shard failures, the request returns partial results. If `false`, it returns an error with no partial results.  To override the default behavior, you can set the `search.default_allow_partial_results` cluster setting to `false`., analyzer: str # The analyzer to use for the query string. This parameter can be used only when the `q` query string parameter is specified., analyze_wildcard: bool # If `true`, wildcard and prefix queries are analyzed. This parameter can be used only when the `q` query string parameter is specified., batched_reduce_size: num # The number of shard results that should be reduced at once on the coordinating node. If the potential number of shards in the request can be large, this value should be used as a protection mechanism to reduce the memory overhead per search request., ccs_minimize_roundtrips: bool # If `true`, network round-trips between the coordinating node and the remote clusters are minimized when running cross-cluster search (CCS) requests., default_operator: str # The default operator for the query string query: `and` or `or`. This parameter can be used only when the `q` query string parameter is specified., df: str # The field to use as a default when no field prefix is given in the query string. This parameter can be used only when the `q` query string parameter is specified., docvalue_fields: any # A comma-separated list of fields to return as the docvalue representation of a field for each hit., expand_wildcards: any # The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. It supports comma-separated values such as `open,hidden`., explain: bool # If `true`, the request returns detailed information about score computation as part of a hit., ignore_throttled: bool # If `true`, concrete, expanded or aliased indices will be ignored when frozen., ignore_unavailable: bool # If `false`, the request returns an error if it targets a concrete (non-wildcarded) index, alias, or data stream that is missing, closed, or otherwise unavailable. If `true`, unavailable concrete targets are silently ignored., include_named_queries_score: bool # If `true`, the response includes the score contribution from any named queries.  This functionality reruns each named query on every hit in a search response. Typically, this adds a small overhead to a request. However, using computationally expensive named queries on a large number of hits may add significant overhead., lenient: bool # If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored. This parameter can be used only when the `q` query string parameter is specified., max_concurrent_shard_requests: num # The number of concurrent shard requests per node that the search runs concurrently. This value should be used to limit the impact of the search on the cluster in order to limit the number of concurrent shard requests., preference: str # The nodes and shards used for the search. By default, Elasticsearch selects from eligible nodes and shards using adaptive replica selection, accounting for allocation awareness. Valid values are:  * `_only_local` to run the search only on shards on the local node. * `_local` to, if possible, run the search on shards on the local node, or if not, select shards using the default method. * `_only_nodes:,` to run the search on only the specified nodes IDs. If suitable shards exist on more than one selected node, use shards on those nodes using the default method. If none of the specified nodes are available, select shards from any available node using the default method. * `_prefer_nodes:,` to if possible, run the search on the specified nodes IDs. If not, select shards using the default method. * `_shards:,` to run the search only on the specified shards. You can combine this value with other `preference` values. However, the `_shards` value must come first. For example: `_shards:2,3|_local`. * `` (any string that does not start with `_`) to route searches with the same `` to the same shards in the same order., pre_filter_shard_size: num # A threshold that enforces a pre-filter roundtrip to prefilter search shards based on query rewriting if the number of shards the search request expands to exceeds the threshold. This filter roundtrip can limit the number of shards significantly if for instance a shard can not match any documents based on its rewrite method (if date filters are mandatory to match but the shard bounds and the query are disjoint). When unspecified, the pre-filter phase is executed if any of these conditions is met:  * The request targets more than 128 shards. * The request targets one or more read-only index. * The primary sort of the query targets an indexed field., request_cache: bool # If `true`, the caching of search results is enabled for requests where `size` is `0`. It defaults to index level settings., routing: any # A custom value that is used to route operations to a specific shard., scroll: any # The period to retain the search context for scrolling. By default, this value cannot exceed `1d` (24 hours). You can change this limit by using the `search.max_keep_alive` cluster-level setting., search_type: str # Indicates how distributed term frequencies are calculated for relevance scoring., stats: [str] # Specific `tag` of the request for logging and statistical purposes., stored_fields: any # A comma-separated list of stored fields to return as part of a hit. If no fields are specified, no stored fields are included in the response. If this field is specified, the `_source` parameter defaults to `false`. You can pass `_source: true` to return both source fields and stored fields in the search response., suggest_field: str # The field to use for suggestions., suggest_mode: str # The suggest mode. This parameter can be used only when the `suggest_field` and `suggest_text` query string parameters are specified., suggest_size: num # The number of suggestions to return. This parameter can be used only when the `suggest_field` and `suggest_text` query string parameters are specified., suggest_text: str # The source text for which the suggestions should be returned. This parameter can be used only when the `suggest_field` and `suggest_text` query string parameters are specified., terminate_after: num # The maximum number of documents to collect for each shard. If a query reaches this limit, Elasticsearch terminates the query early. Elasticsearch collects documents before sorting.  IMPORTANT: Use with caution. Elasticsearch applies this parameter to each shard handling the request. When possible, let Elasticsearch perform early termination automatically. Avoid specifying this parameter for requests that target data streams with backing indices across multiple data tiers. If set to `0` (default), the query does not terminate early., timeout: any # The period of time to wait for a response from each shard. If no response is received before the timeout expires, the request fails and returns an error. It defaults to no timeout., track_total_hits: any # The number of hits matching the query to count accurately. If `true`, the exact number of hits is returned at the cost of some performance. If `false`, the response does not include the total number of hits matching the query., track_scores: bool # If `true`, the request calculates and returns document scores, even if the scores are not used for sorting., typed_keys: bool # If `true`, aggregation and suggester names are be prefixed by their respective types in the response., rest_total_hits_as_int: bool # Indicates whether `hits.total` should be rendered as an integer or an object in the rest search response., version: bool # If `true`, the request returns the document version as part of a hit., _source: any # The source fields that are returned for matching documents. These fields are returned in the `hits._source` property of the search response. Valid values are:  * `true` to return the entire document source. * `false` to not return the document source. * `` to return the source fields that are specified as a comma-separated list that supports wildcard (`*`) patterns., _source_excludes: any # A comma-separated list of source fields to exclude from the response. You can also use this parameter to exclude fields from the subset specified in `_source_includes` query parameter. If the `_source` parameter is `false`, this parameter is ignored., _source_exclude_vectors: bool # Whether vectors should be excluded from _source, _source_includes: any # A comma-separated list of source fields to include in the response. If this parameter is specified, only these source fields are returned. You can exclude fields from this subset using the `_source_excludes` query parameter. If the `_source` parameter is `false`, this parameter is ignored., seq_no_primary_term: bool # If `true`, the request returns the sequence number and primary term of the last modification of each hit., q: str # A query in the Lucene query string syntax. Query parameter searches do not support the full Elasticsearch Query DSL but are handy for testing.  IMPORTANT: This parameter overrides the query parameter in the request body. If both parameters are specified, documents matching the query request body parameter are not returned., size: num # The number of hits to return. By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. To page through more hits, use the `search_after` parameter., from: num # The starting document offset, which must be non-negative. By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. To page through more hits, use the `search_after` parameter., sort: any # A comma-separated list of `:` pairs., aggregations: map # Defines the aggregations that are run as part of the search request., collapse: any # Collapses search results the values of the specified field., explain: bool=false # If `true`, the request returns detailed information about score computation as part of a hit., ext: map # Configuration of search extensions defined by Elasticsearch plugins., from: num=0 # The starting document offset, which must be non-negative. By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. To page through more hits, use the `search_after` parameter., highlight: any # Specifies the highlighter to use for retrieving highlighted snippets from one or more fields in your search results., track_total_hits: any=10000 # Number of hits matching the query to count accurately. If `true`, the exact number of hits is returned at the cost of some performance. If `false`, the  response does not include the total number of hits matching the query., indices_boost: [map] # Boost the `_score` of documents from specified indices. The boost value is the factor by which scores are multiplied. A boost value greater than `1.0` increases the score. A boost value between `0` and `1.0` decreases the score., docvalue_fields: [map{field!: any, format: str, include_unmapped: bool}] # An array of wildcard (`*`) field patterns. The request returns doc values for field names matching these patterns in the `hits.fields` property of the response., knn: any # The approximate kNN search to run., rank: any # The Reciprocal Rank Fusion (RRF) to use., min_score: num # The minimum `_score` for matching documents. Documents with a lower `_score` are not included in search results and results collected by aggregations., post_filter: any # Use the `post_filter` parameter to filter search results. The search hits are filtered after the aggregations are calculated. A post filter has no impact on the aggregation results., profile: bool=false # Set to `true` to return detailed timing information about the execution of individual components in a search request. NOTE: This is a debugging tool and adds significant overhead to search execution., query: any # The search definition using the Query DSL., rescore: any # Can be used to improve precision by reordering just the top (for example 100 - 500) documents returned by the `query` and `post_filter` phases., retriever: any # A retriever is a specification to describe top documents returned from a search. A retriever replaces other elements of the search API that also return top documents such as `query` and `knn`., script_fields: map # Retrieve a script evaluation (based on different fields) for each hit., search_after: any # Used to retrieve the next page of hits using a set of sort values from the previous page., size: num=10 # The number of hits to return, which must not be negative. By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. To page through more hits, use the `search_after` property., slice: any # Split a scrolled search into multiple slices that can be consumed independently., sort: any # A comma-separated list of : pairs., _source: any # The source fields that are returned for matching documents. These fields are returned in the `hits._source` property of the search response. If the `stored_fields` property is specified, the `_source` property defaults to `false`. Otherwise, it defaults to `true`., fields: [map{field!: any, format: str, include_unmapped: bool}] # An array of wildcard (`*`) field patterns. The request returns values for field names matching these patterns in the `hits.fields` property of the response., suggest: any # Defines a suggester that provides similar looking terms based on a provided text., terminate_after: num=0 # The maximum number of documents to collect for each shard. If a query reaches this limit, Elasticsearch terminates the query early. Elasticsearch collects documents before sorting.  IMPORTANT: Use with caution. Elasticsearch applies this property to each shard handling the request. When possible, let Elasticsearch perform early termination automatically. Avoid specifying this property for requests that target data streams with backing indices across multiple data tiers.  If set to `0` (default), the query does not terminate early., timeout: str # The period of time to wait for a response from each shard. If no response is received before the timeout expires, the request fails and returns an error. Defaults to no timeout., track_scores: bool=false # If `true`, calculate and return document scores, even if the scores are not used for sorting., version: bool=false # If `true`, the request returns the document version as part of a hit., seq_no_primary_term: bool # If `true`, the request returns sequence number and primary term of the last modification of each hit., stored_fields: any # A comma-separated list of stored fields to return as part of a hit. If no fields are specified, no stored fields are included in the response. If this field is specified, the `_source` property defaults to `false`. You can pass `_source: true` to return both source fields and stored fields in the search response., pit: any # Limit the search to a point in time (PIT). If you provide a PIT, you cannot specify an `` in the request path., runtime_mappings: any # One or more runtime fields in the search request. These fields take precedence over mapped fields with the same name., stats: [str] # The stats groups to associate with the search. Each group maintains a statistics aggregation for its associated searches. You can retrieve these stats using the indices stats API.}
@returns(200) {took: num, timed_out: bool, _shards: any, hits: any, aggregations: map, _clusters: any, fields: map, max_score: num, num_reduce_phases: num, profile: any, pit_id: any, _scroll_id: any, suggest: map, terminated_early: bool}
@example_request "{\n  \"query\": {\n    \"term\": {\n      \"user.id\": \"kimchy\"\n    }\n  }\n}"

@endgroup

@group {index}
@endpoint GET /{index}/_search
@desc Run a search
@required {index: any # A comma-separated list of data streams, indices, and aliases to search. It supports wildcards (`*`). To search all data streams and indices, omit this parameter or use `*` or `_all`.}
@optional {allow_no_indices: bool # A setting that does two separate checks on the index expression. If `false`, the request returns an error (1) if any wildcard expression (including `_all` and `*`) resolves to zero matching indices or (2) if the complete set of resolved indices, aliases or data streams is empty after all expressions are evaluated. If `true`, index expressions that resolve to no indices are allowed and the request returns an empty result., allow_partial_search_results: bool # If `true` and there are shard request timeouts or shard failures, the request returns partial results. If `false`, it returns an error with no partial results.  To override the default behavior, you can set the `search.default_allow_partial_results` cluster setting to `false`., analyzer: str # The analyzer to use for the query string. This parameter can be used only when the `q` query string parameter is specified., analyze_wildcard: bool # If `true`, wildcard and prefix queries are analyzed. This parameter can be used only when the `q` query string parameter is specified., batched_reduce_size: num # The number of shard results that should be reduced at once on the coordinating node. If the potential number of shards in the request can be large, this value should be used as a protection mechanism to reduce the memory overhead per search request., ccs_minimize_roundtrips: bool # If `true`, network round-trips between the coordinating node and the remote clusters are minimized when running cross-cluster search (CCS) requests., default_operator: str # The default operator for the query string query: `and` or `or`. This parameter can be used only when the `q` query string parameter is specified., df: str # The field to use as a default when no field prefix is given in the query string. This parameter can be used only when the `q` query string parameter is specified., docvalue_fields: any # A comma-separated list of fields to return as the docvalue representation of a field for each hit., expand_wildcards: any # The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. It supports comma-separated values such as `open,hidden`., explain: bool # If `true`, the request returns detailed information about score computation as part of a hit., ignore_throttled: bool # If `true`, concrete, expanded or aliased indices will be ignored when frozen., ignore_unavailable: bool # If `false`, the request returns an error if it targets a concrete (non-wildcarded) index, alias, or data stream that is missing, closed, or otherwise unavailable. If `true`, unavailable concrete targets are silently ignored., include_named_queries_score: bool # If `true`, the response includes the score contribution from any named queries.  This functionality reruns each named query on every hit in a search response. Typically, this adds a small overhead to a request. However, using computationally expensive named queries on a large number of hits may add significant overhead., lenient: bool # If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored. This parameter can be used only when the `q` query string parameter is specified., max_concurrent_shard_requests: num # The number of concurrent shard requests per node that the search runs concurrently. This value should be used to limit the impact of the search on the cluster in order to limit the number of concurrent shard requests., preference: str # The nodes and shards used for the search. By default, Elasticsearch selects from eligible nodes and shards using adaptive replica selection, accounting for allocation awareness. Valid values are:  * `_only_local` to run the search only on shards on the local node. * `_local` to, if possible, run the search on shards on the local node, or if not, select shards using the default method. * `_only_nodes:,` to run the search on only the specified nodes IDs. If suitable shards exist on more than one selected node, use shards on those nodes using the default method. If none of the specified nodes are available, select shards from any available node using the default method. * `_prefer_nodes:,` to if possible, run the search on the specified nodes IDs. If not, select shards using the default method. * `_shards:,` to run the search only on the specified shards. You can combine this value with other `preference` values. However, the `_shards` value must come first. For example: `_shards:2,3|_local`. * `` (any string that does not start with `_`) to route searches with the same `` to the same shards in the same order., pre_filter_shard_size: num # A threshold that enforces a pre-filter roundtrip to prefilter search shards based on query rewriting if the number of shards the search request expands to exceeds the threshold. This filter roundtrip can limit the number of shards significantly if for instance a shard can not match any documents based on its rewrite method (if date filters are mandatory to match but the shard bounds and the query are disjoint). When unspecified, the pre-filter phase is executed if any of these conditions is met:  * The request targets more than 128 shards. * The request targets one or more read-only index. * The primary sort of the query targets an indexed field., request_cache: bool # If `true`, the caching of search results is enabled for requests where `size` is `0`. It defaults to index level settings., routing: any # A custom value that is used to route operations to a specific shard., scroll: any # The period to retain the search context for scrolling. By default, this value cannot exceed `1d` (24 hours). You can change this limit by using the `search.max_keep_alive` cluster-level setting., search_type: str # Indicates how distributed term frequencies are calculated for relevance scoring., stats: [str] # Specific `tag` of the request for logging and statistical purposes., stored_fields: any # A comma-separated list of stored fields to return as part of a hit. If no fields are specified, no stored fields are included in the response. If this field is specified, the `_source` parameter defaults to `false`. You can pass `_source: true` to return both source fields and stored fields in the search response., suggest_field: str # The field to use for suggestions., suggest_mode: str # The suggest mode. This parameter can be used only when the `suggest_field` and `suggest_text` query string parameters are specified., suggest_size: num # The number of suggestions to return. This parameter can be used only when the `suggest_field` and `suggest_text` query string parameters are specified., suggest_text: str # The source text for which the suggestions should be returned. This parameter can be used only when the `suggest_field` and `suggest_text` query string parameters are specified., terminate_after: num # The maximum number of documents to collect for each shard. If a query reaches this limit, Elasticsearch terminates the query early. Elasticsearch collects documents before sorting.  IMPORTANT: Use with caution. Elasticsearch applies this parameter to each shard handling the request. When possible, let Elasticsearch perform early termination automatically. Avoid specifying this parameter for requests that target data streams with backing indices across multiple data tiers. If set to `0` (default), the query does not terminate early., timeout: any # The period of time to wait for a response from each shard. If no response is received before the timeout expires, the request fails and returns an error. It defaults to no timeout., track_total_hits: any # The number of hits matching the query to count accurately. If `true`, the exact number of hits is returned at the cost of some performance. If `false`, the response does not include the total number of hits matching the query., track_scores: bool # If `true`, the request calculates and returns document scores, even if the scores are not used for sorting., typed_keys: bool # If `true`, aggregation and suggester names are be prefixed by their respective types in the response., rest_total_hits_as_int: bool # Indicates whether `hits.total` should be rendered as an integer or an object in the rest search response., version: bool # If `true`, the request returns the document version as part of a hit., _source: any # The source fields that are returned for matching documents. These fields are returned in the `hits._source` property of the search response. Valid values are:  * `true` to return the entire document source. * `false` to not return the document source. * `` to return the source fields that are specified as a comma-separated list that supports wildcard (`*`) patterns., _source_excludes: any # A comma-separated list of source fields to exclude from the response. You can also use this parameter to exclude fields from the subset specified in `_source_includes` query parameter. If the `_source` parameter is `false`, this parameter is ignored., _source_exclude_vectors: bool # Whether vectors should be excluded from _source, _source_includes: any # A comma-separated list of source fields to include in the response. If this parameter is specified, only these source fields are returned. You can exclude fields from this subset using the `_source_excludes` query parameter. If the `_source` parameter is `false`, this parameter is ignored., seq_no_primary_term: bool # If `true`, the request returns the sequence number and primary term of the last modification of each hit., q: str # A query in the Lucene query string syntax. Query parameter searches do not support the full Elasticsearch Query DSL but are handy for testing.  IMPORTANT: This parameter overrides the query parameter in the request body. If both parameters are specified, documents matching the query request body parameter are not returned., size: num # The number of hits to return. By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. To page through more hits, use the `search_after` parameter., from: num # The starting document offset, which must be non-negative. By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. To page through more hits, use the `search_after` parameter., sort: any # A comma-separated list of `:` pairs., aggregations: map # Defines the aggregations that are run as part of the search request., collapse: any # Collapses search results the values of the specified field., explain: bool=false # If `true`, the request returns detailed information about score computation as part of a hit., ext: map # Configuration of search extensions defined by Elasticsearch plugins., from: num=0 # The starting document offset, which must be non-negative. By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. To page through more hits, use the `search_after` parameter., highlight: any # Specifies the highlighter to use for retrieving highlighted snippets from one or more fields in your search results., track_total_hits: any=10000 # Number of hits matching the query to count accurately. If `true`, the exact number of hits is returned at the cost of some performance. If `false`, the  response does not include the total number of hits matching the query., indices_boost: [map] # Boost the `_score` of documents from specified indices. The boost value is the factor by which scores are multiplied. A boost value greater than `1.0` increases the score. A boost value between `0` and `1.0` decreases the score., docvalue_fields: [map{field!: any, format: str, include_unmapped: bool}] # An array of wildcard (`*`) field patterns. The request returns doc values for field names matching these patterns in the `hits.fields` property of the response., knn: any # The approximate kNN search to run., rank: any # The Reciprocal Rank Fusion (RRF) to use., min_score: num # The minimum `_score` for matching documents. Documents with a lower `_score` are not included in search results and results collected by aggregations., post_filter: any # Use the `post_filter` parameter to filter search results. The search hits are filtered after the aggregations are calculated. A post filter has no impact on the aggregation results., profile: bool=false # Set to `true` to return detailed timing information about the execution of individual components in a search request. NOTE: This is a debugging tool and adds significant overhead to search execution., query: any # The search definition using the Query DSL., rescore: any # Can be used to improve precision by reordering just the top (for example 100 - 500) documents returned by the `query` and `post_filter` phases., retriever: any # A retriever is a specification to describe top documents returned from a search. A retriever replaces other elements of the search API that also return top documents such as `query` and `knn`., script_fields: map # Retrieve a script evaluation (based on different fields) for each hit., search_after: any # Used to retrieve the next page of hits using a set of sort values from the previous page., size: num=10 # The number of hits to return, which must not be negative. By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. To page through more hits, use the `search_after` property., slice: any # Split a scrolled search into multiple slices that can be consumed independently., sort: any # A comma-separated list of : pairs., _source: any # The source fields that are returned for matching documents. These fields are returned in the `hits._source` property of the search response. If the `stored_fields` property is specified, the `_source` property defaults to `false`. Otherwise, it defaults to `true`., fields: [map{field!: any, format: str, include_unmapped: bool}] # An array of wildcard (`*`) field patterns. The request returns values for field names matching these patterns in the `hits.fields` property of the response., suggest: any # Defines a suggester that provides similar looking terms based on a provided text., terminate_after: num=0 # The maximum number of documents to collect for each shard. If a query reaches this limit, Elasticsearch terminates the query early. Elasticsearch collects documents before sorting.  IMPORTANT: Use with caution. Elasticsearch applies this property to each shard handling the request. When possible, let Elasticsearch perform early termination automatically. Avoid specifying this property for requests that target data streams with backing indices across multiple data tiers.  If set to `0` (default), the query does not terminate early., timeout: str # The period of time to wait for a response from each shard. If no response is received before the timeout expires, the request fails and returns an error. Defaults to no timeout., track_scores: bool=false # If `true`, calculate and return document scores, even if the scores are not used for sorting., version: bool=false # If `true`, the request returns the document version as part of a hit., seq_no_primary_term: bool # If `true`, the request returns sequence number and primary term of the last modification of each hit., stored_fields: any # A comma-separated list of stored fields to return as part of a hit. If no fields are specified, no stored fields are included in the response. If this field is specified, the `_source` property defaults to `false`. You can pass `_source: true` to return both source fields and stored fields in the search response., pit: any # Limit the search to a point in time (PIT). If you provide a PIT, you cannot specify an `` in the request path., runtime_mappings: any # One or more runtime fields in the search request. These fields take precedence over mapped fields with the same name., stats: [str] # The stats groups to associate with the search. Each group maintains a statistics aggregation for its associated searches. You can retrieve these stats using the indices stats API.}
@returns(200) {took: num, timed_out: bool, _shards: any, hits: any, aggregations: map, _clusters: any, fields: map, max_score: num, num_reduce_phases: num, profile: any, pit_id: any, _scroll_id: any, suggest: map, terminated_early: bool}
@example_request "{\n  \"query\": {\n    \"term\": {\n      \"user.id\": \"kimchy\"\n    }\n  }\n}"

@endpoint POST /{index}/_search
@desc Run a search
@required {index: any # A comma-separated list of data streams, indices, and aliases to search. It supports wildcards (`*`). To search all data streams and indices, omit this parameter or use `*` or `_all`.}
@optional {allow_no_indices: bool # A setting that does two separate checks on the index expression. If `false`, the request returns an error (1) if any wildcard expression (including `_all` and `*`) resolves to zero matching indices or (2) if the complete set of resolved indices, aliases or data streams is empty after all expressions are evaluated. If `true`, index expressions that resolve to no indices are allowed and the request returns an empty result., allow_partial_search_results: bool # If `true` and there are shard request timeouts or shard failures, the request returns partial results. If `false`, it returns an error with no partial results.  To override the default behavior, you can set the `search.default_allow_partial_results` cluster setting to `false`., analyzer: str # The analyzer to use for the query string. This parameter can be used only when the `q` query string parameter is specified., analyze_wildcard: bool # If `true`, wildcard and prefix queries are analyzed. This parameter can be used only when the `q` query string parameter is specified., batched_reduce_size: num # The number of shard results that should be reduced at once on the coordinating node. If the potential number of shards in the request can be large, this value should be used as a protection mechanism to reduce the memory overhead per search request., ccs_minimize_roundtrips: bool # If `true`, network round-trips between the coordinating node and the remote clusters are minimized when running cross-cluster search (CCS) requests., default_operator: str # The default operator for the query string query: `and` or `or`. This parameter can be used only when the `q` query string parameter is specified., df: str # The field to use as a default when no field prefix is given in the query string. This parameter can be used only when the `q` query string parameter is specified., docvalue_fields: any # A comma-separated list of fields to return as the docvalue representation of a field for each hit., expand_wildcards: any # The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. It supports comma-separated values such as `open,hidden`., explain: bool # If `true`, the request returns detailed information about score computation as part of a hit., ignore_throttled: bool # If `true`, concrete, expanded or aliased indices will be ignored when frozen., ignore_unavailable: bool # If `false`, the request returns an error if it targets a concrete (non-wildcarded) index, alias, or data stream that is missing, closed, or otherwise unavailable. If `true`, unavailable concrete targets are silently ignored., include_named_queries_score: bool # If `true`, the response includes the score contribution from any named queries.  This functionality reruns each named query on every hit in a search response. Typically, this adds a small overhead to a request. However, using computationally expensive named queries on a large number of hits may add significant overhead., lenient: bool # If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored. This parameter can be used only when the `q` query string parameter is specified., max_concurrent_shard_requests: num # The number of concurrent shard requests per node that the search runs concurrently. This value should be used to limit the impact of the search on the cluster in order to limit the number of concurrent shard requests., preference: str # The nodes and shards used for the search. By default, Elasticsearch selects from eligible nodes and shards using adaptive replica selection, accounting for allocation awareness. Valid values are:  * `_only_local` to run the search only on shards on the local node. * `_local` to, if possible, run the search on shards on the local node, or if not, select shards using the default method. * `_only_nodes:,` to run the search on only the specified nodes IDs. If suitable shards exist on more than one selected node, use shards on those nodes using the default method. If none of the specified nodes are available, select shards from any available node using the default method. * `_prefer_nodes:,` to if possible, run the search on the specified nodes IDs. If not, select shards using the default method. * `_shards:,` to run the search only on the specified shards. You can combine this value with other `preference` values. However, the `_shards` value must come first. For example: `_shards:2,3|_local`. * `` (any string that does not start with `_`) to route searches with the same `` to the same shards in the same order., pre_filter_shard_size: num # A threshold that enforces a pre-filter roundtrip to prefilter search shards based on query rewriting if the number of shards the search request expands to exceeds the threshold. This filter roundtrip can limit the number of shards significantly if for instance a shard can not match any documents based on its rewrite method (if date filters are mandatory to match but the shard bounds and the query are disjoint). When unspecified, the pre-filter phase is executed if any of these conditions is met:  * The request targets more than 128 shards. * The request targets one or more read-only index. * The primary sort of the query targets an indexed field., request_cache: bool # If `true`, the caching of search results is enabled for requests where `size` is `0`. It defaults to index level settings., routing: any # A custom value that is used to route operations to a specific shard., scroll: any # The period to retain the search context for scrolling. By default, this value cannot exceed `1d` (24 hours). You can change this limit by using the `search.max_keep_alive` cluster-level setting., search_type: str # Indicates how distributed term frequencies are calculated for relevance scoring., stats: [str] # Specific `tag` of the request for logging and statistical purposes., stored_fields: any # A comma-separated list of stored fields to return as part of a hit. If no fields are specified, no stored fields are included in the response. If this field is specified, the `_source` parameter defaults to `false`. You can pass `_source: true` to return both source fields and stored fields in the search response., suggest_field: str # The field to use for suggestions., suggest_mode: str # The suggest mode. This parameter can be used only when the `suggest_field` and `suggest_text` query string parameters are specified., suggest_size: num # The number of suggestions to return. This parameter can be used only when the `suggest_field` and `suggest_text` query string parameters are specified., suggest_text: str # The source text for which the suggestions should be returned. This parameter can be used only when the `suggest_field` and `suggest_text` query string parameters are specified., terminate_after: num # The maximum number of documents to collect for each shard. If a query reaches this limit, Elasticsearch terminates the query early. Elasticsearch collects documents before sorting.  IMPORTANT: Use with caution. Elasticsearch applies this parameter to each shard handling the request. When possible, let Elasticsearch perform early termination automatically. Avoid specifying this parameter for requests that target data streams with backing indices across multiple data tiers. If set to `0` (default), the query does not terminate early., timeout: any # The period of time to wait for a response from each shard. If no response is received before the timeout expires, the request fails and returns an error. It defaults to no timeout., track_total_hits: any # The number of hits matching the query to count accurately. If `true`, the exact number of hits is returned at the cost of some performance. If `false`, the response does not include the total number of hits matching the query., track_scores: bool # If `true`, the request calculates and returns document scores, even if the scores are not used for sorting., typed_keys: bool # If `true`, aggregation and suggester names are be prefixed by their respective types in the response., rest_total_hits_as_int: bool # Indicates whether `hits.total` should be rendered as an integer or an object in the rest search response., version: bool # If `true`, the request returns the document version as part of a hit., _source: any # The source fields that are returned for matching documents. These fields are returned in the `hits._source` property of the search response. Valid values are:  * `true` to return the entire document source. * `false` to not return the document source. * `` to return the source fields that are specified as a comma-separated list that supports wildcard (`*`) patterns., _source_excludes: any # A comma-separated list of source fields to exclude from the response. You can also use this parameter to exclude fields from the subset specified in `_source_includes` query parameter. If the `_source` parameter is `false`, this parameter is ignored., _source_exclude_vectors: bool # Whether vectors should be excluded from _source, _source_includes: any # A comma-separated list of source fields to include in the response. If this parameter is specified, only these source fields are returned. You can exclude fields from this subset using the `_source_excludes` query parameter. If the `_source` parameter is `false`, this parameter is ignored., seq_no_primary_term: bool # If `true`, the request returns the sequence number and primary term of the last modification of each hit., q: str # A query in the Lucene query string syntax. Query parameter searches do not support the full Elasticsearch Query DSL but are handy for testing.  IMPORTANT: This parameter overrides the query parameter in the request body. If both parameters are specified, documents matching the query request body parameter are not returned., size: num # The number of hits to return. By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. To page through more hits, use the `search_after` parameter., from: num # The starting document offset, which must be non-negative. By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. To page through more hits, use the `search_after` parameter., sort: any # A comma-separated list of `:` pairs., aggregations: map # Defines the aggregations that are run as part of the search request., collapse: any # Collapses search results the values of the specified field., explain: bool=false # If `true`, the request returns detailed information about score computation as part of a hit., ext: map # Configuration of search extensions defined by Elasticsearch plugins., from: num=0 # The starting document offset, which must be non-negative. By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. To page through more hits, use the `search_after` parameter., highlight: any # Specifies the highlighter to use for retrieving highlighted snippets from one or more fields in your search results., track_total_hits: any=10000 # Number of hits matching the query to count accurately. If `true`, the exact number of hits is returned at the cost of some performance. If `false`, the  response does not include the total number of hits matching the query., indices_boost: [map] # Boost the `_score` of documents from specified indices. The boost value is the factor by which scores are multiplied. A boost value greater than `1.0` increases the score. A boost value between `0` and `1.0` decreases the score., docvalue_fields: [map{field!: any, format: str, include_unmapped: bool}] # An array of wildcard (`*`) field patterns. The request returns doc values for field names matching these patterns in the `hits.fields` property of the response., knn: any # The approximate kNN search to run., rank: any # The Reciprocal Rank Fusion (RRF) to use., min_score: num # The minimum `_score` for matching documents. Documents with a lower `_score` are not included in search results and results collected by aggregations., post_filter: any # Use the `post_filter` parameter to filter search results. The search hits are filtered after the aggregations are calculated. A post filter has no impact on the aggregation results., profile: bool=false # Set to `true` to return detailed timing information about the execution of individual components in a search request. NOTE: This is a debugging tool and adds significant overhead to search execution., query: any # The search definition using the Query DSL., rescore: any # Can be used to improve precision by reordering just the top (for example 100 - 500) documents returned by the `query` and `post_filter` phases., retriever: any # A retriever is a specification to describe top documents returned from a search. A retriever replaces other elements of the search API that also return top documents such as `query` and `knn`., script_fields: map # Retrieve a script evaluation (based on different fields) for each hit., search_after: any # Used to retrieve the next page of hits using a set of sort values from the previous page., size: num=10 # The number of hits to return, which must not be negative. By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. To page through more hits, use the `search_after` property., slice: any # Split a scrolled search into multiple slices that can be consumed independently., sort: any # A comma-separated list of : pairs., _source: any # The source fields that are returned for matching documents. These fields are returned in the `hits._source` property of the search response. If the `stored_fields` property is specified, the `_source` property defaults to `false`. Otherwise, it defaults to `true`., fields: [map{field!: any, format: str, include_unmapped: bool}] # An array of wildcard (`*`) field patterns. The request returns values for field names matching these patterns in the `hits.fields` property of the response., suggest: any # Defines a suggester that provides similar looking terms based on a provided text., terminate_after: num=0 # The maximum number of documents to collect for each shard. If a query reaches this limit, Elasticsearch terminates the query early. Elasticsearch collects documents before sorting.  IMPORTANT: Use with caution. Elasticsearch applies this property to each shard handling the request. When possible, let Elasticsearch perform early termination automatically. Avoid specifying this property for requests that target data streams with backing indices across multiple data tiers.  If set to `0` (default), the query does not terminate early., timeout: str # The period of time to wait for a response from each shard. If no response is received before the timeout expires, the request fails and returns an error. Defaults to no timeout., track_scores: bool=false # If `true`, calculate and return document scores, even if the scores are not used for sorting., version: bool=false # If `true`, the request returns the document version as part of a hit., seq_no_primary_term: bool # If `true`, the request returns sequence number and primary term of the last modification of each hit., stored_fields: any # A comma-separated list of stored fields to return as part of a hit. If no fields are specified, no stored fields are included in the response. If this field is specified, the `_source` property defaults to `false`. You can pass `_source: true` to return both source fields and stored fields in the search response., pit: any # Limit the search to a point in time (PIT). If you provide a PIT, you cannot specify an `` in the request path., runtime_mappings: any # One or more runtime fields in the search request. These fields take precedence over mapped fields with the same name., stats: [str] # The stats groups to associate with the search. Each group maintains a statistics aggregation for its associated searches. You can retrieve these stats using the indices stats API.}
@returns(200) {took: num, timed_out: bool, _shards: any, hits: any, aggregations: map, _clusters: any, fields: map, max_score: num, num_reduce_phases: num, profile: any, pit_id: any, _scroll_id: any, suggest: map, terminated_early: bool}
@example_request "{\n  \"query\": {\n    \"term\": {\n      \"user.id\": \"kimchy\"\n    }\n  }\n}"

@endgroup

@group _application
@endpoint GET /_application/search_application/{name}
@desc Get search application details
@required {name: str # The name of the search application}
@returns(200)

@endpoint PUT /_application/search_application/{name}
@desc Create or update a search application
@required {name: str # The name of the search application to be created or updated., indices: [str] # Indices that are part of the Search Application.}
@optional {create: bool # If `true`, this request cannot replace or update existing Search Applications., analytics_collection_name: any # Analytics collection associated to the Search Application., template: any # Search template to use on search operations.}
@returns(200) {result: any}
@example_request "{\n  \"indices\": [ \"index1\", \"index2\" ],\n  \"template\": {\n    \"script\": {\n      \"source\": {\n        \"query\": {\n          \"query_string\": {\n            \"query\": \"{{query_string}}\",\n            \"default_field\": \"{{default_field}}\"\n          }\n        }\n      },\n      \"params\": {\n        \"query_string\": \"*\",\n        \"default_field\": \"*\"\n      }\n    },\n    \"dictionary\": {\n      \"properties\": {\n        \"query_string\": {\n          \"type\": \"string\"\n        },\n        \"default_field\": {\n          \"type\": \"string\",\n          \"enum\": [\n            \"title\",\n            \"description\"\n          ]\n        },\n        \"additionalProperties\": false\n      },\n      \"required\": [\n        \"query_string\"\n      ]\n    }\n  }\n}"

@endpoint DELETE /_application/search_application/{name}
@desc Delete a search application
@required {name: str # The name of the search application to delete.}
@returns(200) {acknowledged: bool}

@endpoint GET /_application/analytics/{name}
@desc Get behavioral analytics collections
@required {name: [str] # A list of analytics collections to limit the returned information}
@returns(200)

@endpoint PUT /_application/analytics/{name}
@desc Create a behavioral analytics collection
@required {name: str # The name of the analytics collection to be created or updated.}
@returns(200)

@endpoint DELETE /_application/analytics/{name}
@desc Delete a behavioral analytics collection
@required {name: str # The name of the analytics collection to be deleted}
@returns(200) {acknowledged: bool}

@endpoint GET /_application/analytics
@desc Get behavioral analytics collections
@returns(200)

@endpoint GET /_application/search_application
@desc Get search applications
@optional {q: str # Query in the Lucene query string syntax., from: num # Starting offset., size: num # Specifies a max number of results to get.}
@returns(200) {count: num, results: [any]}

@endpoint POST /_application/analytics/{collection_name}/event/{event_type}
@desc Create a behavioral analytics collection event
@required {collection_name: str # The name of the behavioral analytics collection., event_type: str # The analytics event type.}
@optional {debug: bool # Whether the response type has to include more details}
@returns(200) {accepted: bool, event: map}
@example_request "{\n  \"session\": {\n    \"id\": \"1797ca95-91c9-4e2e-b1bd-9c38e6f386a9\"\n  },\n  \"user\": {\n    \"id\": \"5f26f01a-bbee-4202-9298-81261067abbd\"\n  },\n  \"search\":{\n    \"query\": \"search term\",\n    \"results\": {\n      \"items\": [\n        {\n          \"document\": {\n            \"id\": \"123\",\n            \"index\": \"products\"\n          }\n        }\n      ],\n      \"total_results\": 10\n    },\n    \"sort\": {\n      \"name\": \"relevance\"\n    },\n    \"search_application\": \"website\"\n  },\n  \"document\":{\n    \"id\": \"123\",\n    \"index\": \"products\"\n  }\n}"

@endpoint POST /_application/search_application/{name}/_render_query
@desc Render a search application query
@required {name: str # The name of the search application to render teh query for.}
@optional {params: map}
@returns(200)
@example_request "{\n  \"params\": {\n    \"query_string\": \"my first query\",\n    \"text_fields\": [\n      {\n        \"name\": \"title\",\n        \"boost\": 5\n      },\n      {\n        \"name\": \"description\",\n        \"boost\": 1\n      }\n    ]\n  }\n}"

@endpoint GET /_application/search_application/{name}/_search
@desc Run a search application search
@required {name: str # The name of the search application to be searched.}
@optional {typed_keys: bool # Determines whether aggregation names are prefixed by their respective types in the response., params: map # Query parameters specific to this request, which will override any defaults specified in the template.}
@returns(200) {took: num, timed_out: bool, _shards: any, hits: any, aggregations: map, _clusters: any, fields: map, max_score: num, num_reduce_phases: num, profile: any, pit_id: any, _scroll_id: any, suggest: map, terminated_early: bool}
@example_request "{\n  \"params\": {\n    \"query_string\": \"my first query\",\n    \"text_fields\": [\n      {\"name\": \"title\", \"boost\": 5},\n      {\"name\": \"description\", \"boost\": 1}\n    ]\n  }\n}"

@endpoint POST /_application/search_application/{name}/_search
@desc Run a search application search
@required {name: str # The name of the search application to be searched.}
@optional {typed_keys: bool # Determines whether aggregation names are prefixed by their respective types in the response., params: map # Query parameters specific to this request, which will override any defaults specified in the template.}
@returns(200) {took: num, timed_out: bool, _shards: any, hits: any, aggregations: map, _clusters: any, fields: map, max_score: num, num_reduce_phases: num, profile: any, pit_id: any, _scroll_id: any, suggest: map, terminated_early: bool}
@example_request "{\n  \"params\": {\n    \"query_string\": \"my first query\",\n    \"text_fields\": [\n      {\"name\": \"title\", \"boost\": 5},\n      {\"name\": \"description\", \"boost\": 1}\n    ]\n  }\n}"

@endgroup

@group {index}
@endpoint GET /{index}/_mvt/{field}/{zoom}/{x}/{y}
@desc Search a vector tile
@required {index: any # A list of indices, data streams, or aliases to search. It supports wildcards (`*`). To search all data streams and indices, omit this parameter or use `*` or `_all`. To search a remote cluster, use the `:` syntax., field: str # A field that contains the geospatial data to return. It must be a `geo_point` or `geo_shape` field. The field must have doc values enabled. It cannot be a nested field.  NOTE: Vector tiles do not natively support geometry collections. For `geometrycollection` values in a `geo_shape` field, the API returns a hits layer feature for each element of the collection. This behavior may change in a future release., zoom: num # The zoom level of the vector tile to search. It accepts `0` to `29`., x: num # The X coordinate for the vector tile to search., y: num # The Y coordinate for the vector tile to search.}
@optional {exact_bounds: bool # If `false`, the meta layer's feature is the bounding box of the tile. If true, the meta layer's feature is a bounding box resulting from a geo_bounds aggregation. The aggregation runs on  values that intersect the // tile with wrap_longitude set to false. The resulting bounding box may be larger than the vector tile., extent: num # The size, in pixels, of a side of the tile. Vector tiles are square with equal sides., grid_agg: str # Aggregation used to create a grid for `field`., grid_precision: num # Additional zoom levels available through the aggs layer. For example, if  is 7 and grid_precision is 8, you can zoom in up to level 15. Accepts 0-8. If 0, results don't include the aggs layer., grid_type: str # Determines the geometry type for features in the aggs layer. In the aggs layer, each feature represents a geotile_grid cell. If 'grid' each feature is a Polygon of the cells bounding box. If 'point' each feature is a Point that is the centroid of the cell., size: num # Maximum number of features to return in the hits layer. Accepts 0-10000. If 0, results don't include the hits layer., track_total_hits: any # The number of hits matching the query to count accurately. If `true`, the exact number of hits is returned at the cost of some performance. If `false`, the response does not include the total number of hits matching the query., with_labels: bool # If `true`, the hits and aggs layers will contain additional point features representing suggested label positions for the original features.  * `Point` and `MultiPoint` features will have one of the points selected. * `Polygon` and `MultiPolygon` features will have a single point generated, either the centroid, if it is within the polygon, or another point within the polygon selected from the sorted triangle-tree. * `LineString` features will likewise provide a roughly central point selected from the triangle-tree. * The aggregation results will provide one central point for each aggregation bucket.  All attributes from the original features will also be copied to the new label features. In addition, the new features will be distinguishable using the tag `_mvt_label_position`., aggs: map # Sub-aggregations for the geotile_grid.  It supports the following aggregation types:  - `avg` - `boxplot` - `cardinality` - `extended stats` - `max` - `median absolute deviation` - `min` - `percentile` - `percentile-rank` - `stats` - `sum` - `value count`  The aggregation names can't start with `_mvt_`. The `_mvt_` prefix is reserved for internal aggregations., buffer: num=5 # The size, in pixels, of a clipping buffer outside the tile. This allows renderers to avoid outline artifacts from geometries that extend past the extent of the tile., exact_bounds: bool=false # If `false`, the meta layer's feature is the bounding box of the tile. If `true`, the meta layer's feature is a bounding box resulting from a `geo_bounds` aggregation. The aggregation runs on  values that intersect the `//` tile with `wrap_longitude` set to `false`. The resulting bounding box may be larger than the vector tile., extent: num=4096 # The size, in pixels, of a side of the tile. Vector tiles are square with equal sides., fields: any # The fields to return in the `hits` layer. It supports wildcards (`*`). This parameter does not support fields with array values. Fields with array values may return inconsistent results., grid_agg: any # The aggregation used to create a grid for the `field`., grid_precision: num=8 # Additional zoom levels available through the aggs layer. For example, if `` is `7` and `grid_precision` is `8`, you can zoom in up to level 15. Accepts 0-8. If 0, results don't include the aggs layer., grid_type: any=grid # Determines the geometry type for features in the aggs layer. In the aggs layer, each feature represents a `geotile_grid` cell. If `grid, each feature is a polygon of the cells bounding box. If `point`, each feature is a Point that is the centroid of the cell., query: any # The query DSL used to filter documents for the search., runtime_mappings: any # Defines one or more runtime fields in the search request. These fields take precedence over mapped fields with the same name., size: num=10000 # The maximum number of features to return in the hits layer. Accepts 0-10000. If 0, results don't include the hits layer., sort: any # Sort the features in the hits layer. By default, the API calculates a bounding box for each feature. It sorts features based on this box's diagonal length, from longest to shortest., track_total_hits: any=10000 # The number of hits matching the query to count accurately. If `true`, the exact number of hits is returned at the cost of some performance. If `false`, the response does not include the total number of hits matching the query., with_labels: bool # If `true`, the hits and aggs layers will contain additional point features representing suggested label positions for the original features.  * `Point` and `MultiPoint` features will have one of the points selected. * `Polygon` and `MultiPolygon` features will have a single point generated, either the centroid, if it is within the polygon, or another point within the polygon selected from the sorted triangle-tree. * `LineString` features will likewise provide a roughly central point selected from the triangle-tree. * The aggregation results will provide one central point for each aggregation bucket.  All attributes from the original features will also be copied to the new label features. In addition, the new features will be distinguishable using the tag `_mvt_label_position`.}
@returns(200)
@example_request "{\n  \"grid_agg\": \"geotile\",\n  \"grid_precision\": 2,\n  \"fields\": [\n    \"name\",\n    \"price\"\n  ],\n  \"query\": {\n    \"term\": {\n      \"included\": true\n    }\n  },\n  \"aggs\": {\n    \"min_price\": {\n      \"min\": {\n        \"field\": \"price\"\n      }\n    },\n    \"max_price\": {\n      \"max\": {\n        \"field\": \"price\"\n      }\n    },\n    \"avg_price\": {\n      \"avg\": {\n        \"field\": \"price\"\n      }\n    }\n  }\n}"

@endpoint POST /{index}/_mvt/{field}/{zoom}/{x}/{y}
@desc Search a vector tile
@required {index: any # A list of indices, data streams, or aliases to search. It supports wildcards (`*`). To search all data streams and indices, omit this parameter or use `*` or `_all`. To search a remote cluster, use the `:` syntax., field: str # A field that contains the geospatial data to return. It must be a `geo_point` or `geo_shape` field. The field must have doc values enabled. It cannot be a nested field.  NOTE: Vector tiles do not natively support geometry collections. For `geometrycollection` values in a `geo_shape` field, the API returns a hits layer feature for each element of the collection. This behavior may change in a future release., zoom: num # The zoom level of the vector tile to search. It accepts `0` to `29`., x: num # The X coordinate for the vector tile to search., y: num # The Y coordinate for the vector tile to search.}
@optional {exact_bounds: bool # If `false`, the meta layer's feature is the bounding box of the tile. If true, the meta layer's feature is a bounding box resulting from a geo_bounds aggregation. The aggregation runs on  values that intersect the // tile with wrap_longitude set to false. The resulting bounding box may be larger than the vector tile., extent: num # The size, in pixels, of a side of the tile. Vector tiles are square with equal sides., grid_agg: str # Aggregation used to create a grid for `field`., grid_precision: num # Additional zoom levels available through the aggs layer. For example, if  is 7 and grid_precision is 8, you can zoom in up to level 15. Accepts 0-8. If 0, results don't include the aggs layer., grid_type: str # Determines the geometry type for features in the aggs layer. In the aggs layer, each feature represents a geotile_grid cell. If 'grid' each feature is a Polygon of the cells bounding box. If 'point' each feature is a Point that is the centroid of the cell., size: num # Maximum number of features to return in the hits layer. Accepts 0-10000. If 0, results don't include the hits layer., track_total_hits: any # The number of hits matching the query to count accurately. If `true`, the exact number of hits is returned at the cost of some performance. If `false`, the response does not include the total number of hits matching the query., with_labels: bool # If `true`, the hits and aggs layers will contain additional point features representing suggested label positions for the original features.  * `Point` and `MultiPoint` features will have one of the points selected. * `Polygon` and `MultiPolygon` features will have a single point generated, either the centroid, if it is within the polygon, or another point within the polygon selected from the sorted triangle-tree. * `LineString` features will likewise provide a roughly central point selected from the triangle-tree. * The aggregation results will provide one central point for each aggregation bucket.  All attributes from the original features will also be copied to the new label features. In addition, the new features will be distinguishable using the tag `_mvt_label_position`., aggs: map # Sub-aggregations for the geotile_grid.  It supports the following aggregation types:  - `avg` - `boxplot` - `cardinality` - `extended stats` - `max` - `median absolute deviation` - `min` - `percentile` - `percentile-rank` - `stats` - `sum` - `value count`  The aggregation names can't start with `_mvt_`. The `_mvt_` prefix is reserved for internal aggregations., buffer: num=5 # The size, in pixels, of a clipping buffer outside the tile. This allows renderers to avoid outline artifacts from geometries that extend past the extent of the tile., exact_bounds: bool=false # If `false`, the meta layer's feature is the bounding box of the tile. If `true`, the meta layer's feature is a bounding box resulting from a `geo_bounds` aggregation. The aggregation runs on  values that intersect the `//` tile with `wrap_longitude` set to `false`. The resulting bounding box may be larger than the vector tile., extent: num=4096 # The size, in pixels, of a side of the tile. Vector tiles are square with equal sides., fields: any # The fields to return in the `hits` layer. It supports wildcards (`*`). This parameter does not support fields with array values. Fields with array values may return inconsistent results., grid_agg: any # The aggregation used to create a grid for the `field`., grid_precision: num=8 # Additional zoom levels available through the aggs layer. For example, if `` is `7` and `grid_precision` is `8`, you can zoom in up to level 15. Accepts 0-8. If 0, results don't include the aggs layer., grid_type: any=grid # Determines the geometry type for features in the aggs layer. In the aggs layer, each feature represents a `geotile_grid` cell. If `grid, each feature is a polygon of the cells bounding box. If `point`, each feature is a Point that is the centroid of the cell., query: any # The query DSL used to filter documents for the search., runtime_mappings: any # Defines one or more runtime fields in the search request. These fields take precedence over mapped fields with the same name., size: num=10000 # The maximum number of features to return in the hits layer. Accepts 0-10000. If 0, results don't include the hits layer., sort: any # Sort the features in the hits layer. By default, the API calculates a bounding box for each feature. It sorts features based on this box's diagonal length, from longest to shortest., track_total_hits: any=10000 # The number of hits matching the query to count accurately. If `true`, the exact number of hits is returned at the cost of some performance. If `false`, the response does not include the total number of hits matching the query., with_labels: bool # If `true`, the hits and aggs layers will contain additional point features representing suggested label positions for the original features.  * `Point` and `MultiPoint` features will have one of the points selected. * `Polygon` and `MultiPolygon` features will have a single point generated, either the centroid, if it is within the polygon, or another point within the polygon selected from the sorted triangle-tree. * `LineString` features will likewise provide a roughly central point selected from the triangle-tree. * The aggregation results will provide one central point for each aggregation bucket.  All attributes from the original features will also be copied to the new label features. In addition, the new features will be distinguishable using the tag `_mvt_label_position`.}
@returns(200)
@example_request "{\n  \"grid_agg\": \"geotile\",\n  \"grid_precision\": 2,\n  \"fields\": [\n    \"name\",\n    \"price\"\n  ],\n  \"query\": {\n    \"term\": {\n      \"included\": true\n    }\n  },\n  \"aggs\": {\n    \"min_price\": {\n      \"min\": {\n        \"field\": \"price\"\n      }\n    },\n    \"max_price\": {\n      \"max\": {\n        \"field\": \"price\"\n      }\n    },\n    \"avg_price\": {\n      \"avg\": {\n        \"field\": \"price\"\n      }\n    }\n  }\n}"

@endgroup

@group _search_shards
@endpoint GET /_search_shards
@desc Get the search shards
@optional {allow_no_indices: bool # A setting that does two separate checks on the index expression. If `false`, the request returns an error (1) if any wildcard expression (including `_all` and `*`) resolves to zero matching indices or (2) if the complete set of resolved indices, aliases or data streams is empty after all expressions are evaluated. If `true`, index expressions that resolve to no indices are allowed and the request returns an empty result., expand_wildcards: any # Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`., ignore_unavailable: bool # If `false`, the request returns an error if it targets a concrete (non-wildcarded) index, alias, or data stream that is missing, closed, or otherwise unavailable. If `true`, unavailable concrete targets are silently ignored., local: bool # If `true`, the request retrieves information from the local node only., master_timeout: any # The period to wait for a connection to the master node. If the master node is not available before the timeout expires, the request fails and returns an error. IT can also be set to `-1` to indicate that the request should never timeout., preference: str # The node or shard the operation should be performed on. It is random by default., routing: any # A custom value used to route operations to a specific shard.}
@returns(200) {nodes: map, shards: [[map]], indices: map}

@endpoint POST /_search_shards
@desc Get the search shards
@optional {allow_no_indices: bool # A setting that does two separate checks on the index expression. If `false`, the request returns an error (1) if any wildcard expression (including `_all` and `*`) resolves to zero matching indices or (2) if the complete set of resolved indices, aliases or data streams is empty after all expressions are evaluated. If `true`, index expressions that resolve to no indices are allowed and the request returns an empty result., expand_wildcards: any # Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`., ignore_unavailable: bool # If `false`, the request returns an error if it targets a concrete (non-wildcarded) index, alias, or data stream that is missing, closed, or otherwise unavailable. If `true`, unavailable concrete targets are silently ignored., local: bool # If `true`, the request retrieves information from the local node only., master_timeout: any # The period to wait for a connection to the master node. If the master node is not available before the timeout expires, the request fails and returns an error. IT can also be set to `-1` to indicate that the request should never timeout., preference: str # The node or shard the operation should be performed on. It is random by default., routing: any # A custom value used to route operations to a specific shard.}
@returns(200) {nodes: map, shards: [[map]], indices: map}

@endgroup

@group {index}
@endpoint GET /{index}/_search_shards
@desc Get the search shards
@required {index: any # A comma-separated list of data streams, indices, and aliases to search. It supports wildcards (`*`). To search all data streams and indices, omit this parameter or use `*` or `_all`.}
@optional {allow_no_indices: bool # A setting that does two separate checks on the index expression. If `false`, the request returns an error (1) if any wildcard expression (including `_all` and `*`) resolves to zero matching indices or (2) if the complete set of resolved indices, aliases or data streams is empty after all expressions are evaluated. If `true`, index expressions that resolve to no indices are allowed and the request returns an empty result., expand_wildcards: any # Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`., ignore_unavailable: bool # If `false`, the request returns an error if it targets a concrete (non-wildcarded) index, alias, or data stream that is missing, closed, or otherwise unavailable. If `true`, unavailable concrete targets are silently ignored., local: bool # If `true`, the request retrieves information from the local node only., master_timeout: any # The period to wait for a connection to the master node. If the master node is not available before the timeout expires, the request fails and returns an error. IT can also be set to `-1` to indicate that the request should never timeout., preference: str # The node or shard the operation should be performed on. It is random by default., routing: any # A custom value used to route operations to a specific shard.}
@returns(200) {nodes: map, shards: [[map]], indices: map}

@endpoint POST /{index}/_search_shards
@desc Get the search shards
@required {index: any # A comma-separated list of data streams, indices, and aliases to search. It supports wildcards (`*`). To search all data streams and indices, omit this parameter or use `*` or `_all`.}
@optional {allow_no_indices: bool # A setting that does two separate checks on the index expression. If `false`, the request returns an error (1) if any wildcard expression (including `_all` and `*`) resolves to zero matching indices or (2) if the complete set of resolved indices, aliases or data streams is empty after all expressions are evaluated. If `true`, index expressions that resolve to no indices are allowed and the request returns an empty result., expand_wildcards: any # Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`., ignore_unavailable: bool # If `false`, the request returns an error if it targets a concrete (non-wildcarded) index, alias, or data stream that is missing, closed, or otherwise unavailable. If `true`, unavailable concrete targets are silently ignored., local: bool # If `true`, the request retrieves information from the local node only., master_timeout: any # The period to wait for a connection to the master node. If the master node is not available before the timeout expires, the request fails and returns an error. IT can also be set to `-1` to indicate that the request should never timeout., preference: str # The node or shard the operation should be performed on. It is random by default., routing: any # A custom value used to route operations to a specific shard.}
@returns(200) {nodes: map, shards: [[map]], indices: map}

@endgroup

@group _search
@endpoint GET /_search/template
@desc Run a search with a search template
@optional {allow_no_indices: bool # A setting that does two separate checks on the index expression. If `false`, the request returns an error (1) if any wildcard expression (including `_all` and `*`) resolves to zero matching indices or (2) if the complete set of resolved indices, aliases or data streams is empty after all expressions are evaluated. If `true`, index expressions that resolve to no indices are allowed and the request returns an empty result., ccs_minimize_roundtrips: bool # Indicates whether network round-trips should be minimized as part of cross-cluster search requests execution., expand_wildcards: any # The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`., explain: bool # If `true`, the response includes additional details about score computation as part of a hit., ignore_throttled: bool # If `true`, specified concrete, expanded, or aliased indices are not included in the response when throttled., ignore_unavailable: bool # If `false`, the request returns an error if it targets a concrete (non-wildcarded) index, alias, or data stream that is missing, closed, or otherwise unavailable. If `true`, unavailable concrete targets are silently ignored., preference: str # The node or shard the operation should be performed on. It is random by default., profile: bool # If `true`, the query execution is profiled., routing: any # A custom value used to route operations to a specific shard., scroll: any # Specifies how long a consistent view of the index should be maintained for scrolled search., search_type: str # The type of the search operation., rest_total_hits_as_int: bool # If `true`, `hits.total` is rendered as an integer in the response. If `false`, it is rendered as an object., typed_keys: bool # If `true`, the response prefixes aggregation and suggester names with their respective types., explain: bool=false # If `true`, returns detailed information about score calculation as part of each hit. If you specify both this and the `explain` query parameter, the API uses only the query parameter., id: any # The ID of the search template to use. If no `source` is specified, this parameter is required., params: map # Key-value pairs used to replace Mustache variables in the template. The key is the variable name. The value is the variable value., profile: bool=false # If `true`, the query execution is profiled., source: any # An inline search template. Supports the same parameters as the search API's request body. It also supports Mustache variables. If no `id` is specified, this parameter is required.}
@returns(200) {took: num, timed_out: bool, _shards: any, hits: any, aggregations: map, _clusters: any, fields: map, max_score: num, num_reduce_phases: num, profile: any, pit_id: any, _scroll_id: any, suggest: map, terminated_early: bool}
@example_request "{\n  \"id\": \"my-search-template\",\n  \"params\": {\n    \"query_string\": \"hello world\",\n    \"from\": 0,\n    \"size\": 10\n  }\n}"

@endpoint POST /_search/template
@desc Run a search with a search template
@optional {allow_no_indices: bool # A setting that does two separate checks on the index expression. If `false`, the request returns an error (1) if any wildcard expression (including `_all` and `*`) resolves to zero matching indices or (2) if the complete set of resolved indices, aliases or data streams is empty after all expressions are evaluated. If `true`, index expressions that resolve to no indices are allowed and the request returns an empty result., ccs_minimize_roundtrips: bool # Indicates whether network round-trips should be minimized as part of cross-cluster search requests execution., expand_wildcards: any # The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`., explain: bool # If `true`, the response includes additional details about score computation as part of a hit., ignore_throttled: bool # If `true`, specified concrete, expanded, or aliased indices are not included in the response when throttled., ignore_unavailable: bool # If `false`, the request returns an error if it targets a concrete (non-wildcarded) index, alias, or data stream that is missing, closed, or otherwise unavailable. If `true`, unavailable concrete targets are silently ignored., preference: str # The node or shard the operation should be performed on. It is random by default., profile: bool # If `true`, the query execution is profiled., routing: any # A custom value used to route operations to a specific shard., scroll: any # Specifies how long a consistent view of the index should be maintained for scrolled search., search_type: str # The type of the search operation., rest_total_hits_as_int: bool # If `true`, `hits.total` is rendered as an integer in the response. If `false`, it is rendered as an object., typed_keys: bool # If `true`, the response prefixes aggregation and suggester names with their respective types., explain: bool=false # If `true`, returns detailed information about score calculation as part of each hit. If you specify both this and the `explain` query parameter, the API uses only the query parameter., id: any # The ID of the search template to use. If no `source` is specified, this parameter is required., params: map # Key-value pairs used to replace Mustache variables in the template. The key is the variable name. The value is the variable value., profile: bool=false # If `true`, the query execution is profiled., source: any # An inline search template. Supports the same parameters as the search API's request body. It also supports Mustache variables. If no `id` is specified, this parameter is required.}
@returns(200) {took: num, timed_out: bool, _shards: any, hits: any, aggregations: map, _clusters: any, fields: map, max_score: num, num_reduce_phases: num, profile: any, pit_id: any, _scroll_id: any, suggest: map, terminated_early: bool}
@example_request "{\n  \"id\": \"my-search-template\",\n  \"params\": {\n    \"query_string\": \"hello world\",\n    \"from\": 0,\n    \"size\": 10\n  }\n}"

@endgroup

@group {index}
@endpoint GET /{index}/_search/template
@desc Run a search with a search template
@required {index: any # A comma-separated list of data streams, indices, and aliases to search. It supports wildcards (`*`).}
@optional {allow_no_indices: bool # A setting that does two separate checks on the index expression. If `false`, the request returns an error (1) if any wildcard expression (including `_all` and `*`) resolves to zero matching indices or (2) if the complete set of resolved indices, aliases or data streams is empty after all expressions are evaluated. If `true`, index expressions that resolve to no indices are allowed and the request returns an empty result., ccs_minimize_roundtrips: bool # Indicates whether network round-trips should be minimized as part of cross-cluster search requests execution., expand_wildcards: any # The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`., explain: bool # If `true`, the response includes additional details about score computation as part of a hit., ignore_throttled: bool # If `true`, specified concrete, expanded, or aliased indices are not included in the response when throttled., ignore_unavailable: bool # If `false`, the request returns an error if it targets a concrete (non-wildcarded) index, alias, or data stream that is missing, closed, or otherwise unavailable. If `true`, unavailable concrete targets are silently ignored., preference: str # The node or shard the operation should be performed on. It is random by default., profile: bool # If `true`, the query execution is profiled., routing: any # A custom value used to route operations to a specific shard., scroll: any # Specifies how long a consistent view of the index should be maintained for scrolled search., search_type: str # The type of the search operation., rest_total_hits_as_int: bool # If `true`, `hits.total` is rendered as an integer in the response. If `false`, it is rendered as an object., typed_keys: bool # If `true`, the response prefixes aggregation and suggester names with their respective types., explain: bool=false # If `true`, returns detailed information about score calculation as part of each hit. If you specify both this and the `explain` query parameter, the API uses only the query parameter., id: any # The ID of the search template to use. If no `source` is specified, this parameter is required., params: map # Key-value pairs used to replace Mustache variables in the template. The key is the variable name. The value is the variable value., profile: bool=false # If `true`, the query execution is profiled., source: any # An inline search template. Supports the same parameters as the search API's request body. It also supports Mustache variables. If no `id` is specified, this parameter is required.}
@returns(200) {took: num, timed_out: bool, _shards: any, hits: any, aggregations: map, _clusters: any, fields: map, max_score: num, num_reduce_phases: num, profile: any, pit_id: any, _scroll_id: any, suggest: map, terminated_early: bool}
@example_request "{\n  \"id\": \"my-search-template\",\n  \"params\": {\n    \"query_string\": \"hello world\",\n    \"from\": 0,\n    \"size\": 10\n  }\n}"

@endpoint POST /{index}/_search/template
@desc Run a search with a search template
@required {index: any # A comma-separated list of data streams, indices, and aliases to search. It supports wildcards (`*`).}
@optional {allow_no_indices: bool # A setting that does two separate checks on the index expression. If `false`, the request returns an error (1) if any wildcard expression (including `_all` and `*`) resolves to zero matching indices or (2) if the complete set of resolved indices, aliases or data streams is empty after all expressions are evaluated. If `true`, index expressions that resolve to no indices are allowed and the request returns an empty result., ccs_minimize_roundtrips: bool # Indicates whether network round-trips should be minimized as part of cross-cluster search requests execution., expand_wildcards: any # The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`., explain: bool # If `true`, the response includes additional details about score computation as part of a hit., ignore_throttled: bool # If `true`, specified concrete, expanded, or aliased indices are not included in the response when throttled., ignore_unavailable: bool # If `false`, the request returns an error if it targets a concrete (non-wildcarded) index, alias, or data stream that is missing, closed, or otherwise unavailable. If `true`, unavailable concrete targets are silently ignored., preference: str # The node or shard the operation should be performed on. It is random by default., profile: bool # If `true`, the query execution is profiled., routing: any # A custom value used to route operations to a specific shard., scroll: any # Specifies how long a consistent view of the index should be maintained for scrolled search., search_type: str # The type of the search operation., rest_total_hits_as_int: bool # If `true`, `hits.total` is rendered as an integer in the response. If `false`, it is rendered as an object., typed_keys: bool # If `true`, the response prefixes aggregation and suggester names with their respective types., explain: bool=false # If `true`, returns detailed information about score calculation as part of each hit. If you specify both this and the `explain` query parameter, the API uses only the query parameter., id: any # The ID of the search template to use. If no `source` is specified, this parameter is required., params: map # Key-value pairs used to replace Mustache variables in the template. The key is the variable name. The value is the variable value., profile: bool=false # If `true`, the query execution is profiled., source: any # An inline search template. Supports the same parameters as the search API's request body. It also supports Mustache variables. If no `id` is specified, this parameter is required.}
@returns(200) {took: num, timed_out: bool, _shards: any, hits: any, aggregations: map, _clusters: any, fields: map, max_score: num, num_reduce_phases: num, profile: any, pit_id: any, _scroll_id: any, suggest: map, terminated_early: bool}
@example_request "{\n  \"id\": \"my-search-template\",\n  \"params\": {\n    \"query_string\": \"hello world\",\n    \"from\": 0,\n    \"size\": 10\n  }\n}"

@endgroup

@group _searchable_snapshots
@endpoint GET /_searchable_snapshots/cache/stats
@desc Get cache statistics
@returns(200) {nodes: map}

@endpoint GET /_searchable_snapshots/{node_id}/cache/stats
@desc Get cache statistics
@required {node_id: any # The names of the nodes in the cluster to target.}
@returns(200) {nodes: map}

@endpoint POST /_searchable_snapshots/cache/clear
@desc Clear the cache
@optional {expand_wildcards: any # Whether to expand wildcard expression to concrete indices that are open, closed or both, allow_no_indices: bool # A setting that does two separate checks on the index expression. If `false`, the request returns an error (1) if any wildcard expression (including `_all` and `*`) resolves to zero matching indices or (2) if the complete set of resolved indices, aliases or data streams is empty after all expressions are evaluated. If `true`, index expressions that resolve to no indices are allowed and the request returns an empty result., ignore_unavailable: bool # If `false`, the request returns an error if it targets a concrete (non-wildcarded) index, alias, or data stream that is missing, closed, or otherwise unavailable. If `true`, unavailable concrete targets are silently ignored.}
@returns(200)

@endgroup

@group {index}
@endpoint POST /{index}/_searchable_snapshots/cache/clear
@desc Clear the cache
@required {index: any # A comma-separated list of data streams, indices, and aliases to clear from the cache. It supports wildcards (`*`).}
@optional {expand_wildcards: any # Whether to expand wildcard expression to concrete indices that are open, closed or both, allow_no_indices: bool # A setting that does two separate checks on the index expression. If `false`, the request returns an error (1) if any wildcard expression (including `_all` and `*`) resolves to zero matching indices or (2) if the complete set of resolved indices, aliases or data streams is empty after all expressions are evaluated. If `true`, index expressions that resolve to no indices are allowed and the request returns an empty result., ignore_unavailable: bool # If `false`, the request returns an error if it targets a concrete (non-wildcarded) index, alias, or data stream that is missing, closed, or otherwise unavailable. If `true`, unavailable concrete targets are silently ignored.}
@returns(200)

@endgroup

@group _snapshot
@endpoint POST /_snapshot/{repository}/{snapshot}/_mount
@desc Mount a snapshot
@required {repository: str # The name of the repository containing the snapshot of the index to mount., snapshot: str # The name of the snapshot of the index to mount., index: any # The name of the index contained in the snapshot whose data is to be mounted. If no `renamed_index` is specified, this name will also be used to create the new index.}
@optional {master_timeout: any # The period to wait for the master node. If the master node is not available before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`., wait_for_completion: bool # If true, the request blocks until the operation is complete., storage: str # The mount option for the searchable snapshot index. For further information on mount options, refer to: [Mount options](https://www.elastic.co/docs/deploy-manage/tools/snapshot-and-restore/searchable-snapshots#searchable-snapshot-mount-storage-options), renamed_index: any # The name of the index that will be created., index_settings: map # The settings that should be added to the index when it is mounted., ignore_index_settings: [str] # The names of settings that should be removed from the index when it is mounted.}
@returns(200) {snapshot: any}
@example_request "{\n  \"index\": \"my_docs\",\n  \"renamed_index\": \"docs\",\n  \"index_settings\": {\n    \"index.number_of_replicas\": 0\n  },\n  \"ignore_index_settings\": [ \"index.refresh_interval\" ]\n}"

@endgroup

@group _searchable_snapshots
@endpoint GET /_searchable_snapshots/stats
@desc Get searchable snapshot statistics
@optional {level: str # Return stats aggregated at cluster, index or shard level}
@returns(200) {stats: map, total: map}

@endgroup

@group {index}
@endpoint GET /{index}/_searchable_snapshots/stats
@desc Get searchable snapshot statistics
@required {index: any # A comma-separated list of data streams and indices to retrieve statistics for.}
@optional {level: str # Return stats aggregated at cluster, index or shard level}
@returns(200) {stats: map, total: map}

@endgroup

@group _security
@endpoint POST /_security/profile/_activate
@desc Activate a user profile
@required {grant_type: any # The type of grant.}
@optional {access_token: str # The user's Elasticsearch access token or JWT. Both `access` and `id` JWT token types are supported and they depend on the underlying JWT realm configuration. If you specify the `access_token` grant type, this parameter is required. It is not valid with other grant types., password: str # The user's password. If you specify the `password` grant type, this parameter is required. It is not valid with other grant types., username: str # The username that identifies the user. If you specify the `password` grant type, this parameter is required. It is not valid with other grant types.}
@returns(200)
@example_request "{\n  \"grant_type\": \"password\",\n  \"username\" : \"jacknich\",\n  \"password\" : \"l0ng-r4nd0m-p@ssw0rd\"\n}"

@endpoint GET /_security/_authenticate
@desc Authenticate a user
@returns(200) {api_key: any, authentication_realm: any, email: any, full_name: any, lookup_realm: any, metadata: any, roles: [str], username: any, enabled: bool, authentication_type: str, token: any}

@endpoint GET /_security/role
@desc Get roles
@returns(200)

@endpoint POST /_security/role
@desc Bulk create or update roles
@required {roles: map # A dictionary of role name to RoleDescriptor objects to add or update}
@optional {refresh: str # If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes.}
@returns(200) {created: [str], updated: [str], noop: [str], errors: any}
@example_request "{\n  \"roles\": {\n      \"my_admin_role\": {\n          \"cluster\": [\n              \"all\"\n          ],\n          \"indices\": [\n              {\n                  \"names\": [\n                      \"index1\",\n                      \"index2\"\n                  ],\n                  \"privileges\": [\n                      \"all\"\n                  ],\n                  \"field_security\": {\n                      \"grant\": [\n                          \"title\",\n                          \"body\"\n                      ]\n                  },\n                  \"query\": \"{\\\"match\\\": {\\\"title\\\": \\\"foo\\\"}}\"\n              }\n          ],\n          \"applications\": [\n              {\n                  \"application\": \"myapp\",\n                  \"privileges\": [\n                      \"admin\",\n                      \"read\"\n                  ],\n                  \"resources\": [\n                      \"*\"\n                  ]\n              }\n          ],\n          \"run_as\": [\n              \"other_user\"\n          ],\n          \"metadata\": {\n              \"version\": 1\n          }\n      },\n      \"my_user_role\": {\n          \"cluster\": [\n              \"all\"\n          ],\n          \"indices\": [\n              {\n                  \"names\": [\n                      \"index1\"\n                  ],\n                  \"privileges\": [\n                      \"read\"\n                  ],\n                  \"field_security\": {\n                      \"grant\": [\n                          \"title\",\n                          \"body\"\n                      ]\n                  },\n                  \"query\": \"{\\\"match\\\": {\\\"title\\\": \\\"foo\\\"}}\"\n              }\n          ],\n          \"applications\": [\n              {\n                  \"application\": \"myapp\",\n                  \"privileges\": [\n                      \"admin\",\n                      \"read\"\n                  ],\n                  \"resources\": [\n                      \"*\"\n                  ]\n              }\n          ],\n          \"run_as\": [\n              \"other_user\"\n          ],\n          \"metadata\": {\n              \"version\": 1\n          }\n      }\n  }\n}"

@endpoint DELETE /_security/role
@desc Bulk delete roles
@required {names: [str] # An array of role names to delete}
@optional {refresh: str # If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes.}
@returns(200) {deleted: [str], not_found: [str], errors: any}
@example_request "{\n  \"names\": [\"my_admin_role\", \"my_user_role\"]\n}"

@endpoint POST /_security/api_key/_bulk_update
@desc Bulk update API keys
@required {ids: any # The API key identifiers.}
@optional {expiration: any # Expiration time for the API keys. By default, API keys never expire. This property can be omitted to leave the value unchanged., metadata: any # Arbitrary nested metadata to associate with the API keys. Within the `metadata` object, top-level keys beginning with an underscore (`_`) are reserved for system usage. Any information specified with this parameter fully replaces metadata previously associated with the API key., role_descriptors: map # The role descriptors to assign to the API keys. An API key's effective permissions are an intersection of its assigned privileges and the point-in-time snapshot of permissions of the owner user. You can assign new privileges by specifying them in this parameter. To remove assigned privileges, supply the `role_descriptors` parameter as an empty object `{}`. If an API key has no assigned privileges, it inherits the owner user's full permissions. The snapshot of the owner's permissions is always updated, whether you supply the `role_descriptors` parameter. The structure of a role descriptor is the same as the request for the create API keys API.}
@returns(200) {errors: any, noops: [str], updated: [str]}
@example_request "{\n  \"ids\": [\n    \"VuaCfGcBCdbkQm-e5aOx\",\n    \"H3_AhoIBA9hmeQJdg7ij\"\n  ],\n  \"role_descriptors\": {\n    \"role-a\": {\n      \"indices\": [\n        {\n          \"names\": [\n            \"*\"\n          ],\n          \"privileges\": [\n            \"write\"\n          ]\n        }\n      ]\n    }\n  },\n  \"metadata\": {\n    \"environment\": {\n      \"level\": 2,\n      \"trusted\": true,\n      \"tags\": [\n        \"production\"\n      ]\n    }\n  },\n  \"expiration\": \"30d\"\n}"

@endpoint PUT /_security/user/{username}/_password
@desc Change passwords
@required {username: str # The user whose password you want to change. If you do not specify this parameter, the password is changed for the current user.}
@optional {refresh: str # If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes., password: any # The new password value. Passwords must be at least 6 characters long., password_hash: str # A hash of the new password value. This must be produced using the same hashing algorithm as has been configured for password storage. For more details, see the explanation of the `xpack.security.authc.password_hashing.algorithm` setting.}
@returns(200)
@example_request "{\n  \"password\" : \"new-test-password\"\n}"

@endpoint POST /_security/user/{username}/_password
@desc Change passwords
@required {username: str # The user whose password you want to change. If you do not specify this parameter, the password is changed for the current user.}
@optional {refresh: str # If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes., password: any # The new password value. Passwords must be at least 6 characters long., password_hash: str # A hash of the new password value. This must be produced using the same hashing algorithm as has been configured for password storage. For more details, see the explanation of the `xpack.security.authc.password_hashing.algorithm` setting.}
@returns(200)
@example_request "{\n  \"password\" : \"new-test-password\"\n}"

@endpoint PUT /_security/user/_password
@desc Change passwords
@optional {refresh: str # If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes., password: any # The new password value. Passwords must be at least 6 characters long., password_hash: str # A hash of the new password value. This must be produced using the same hashing algorithm as has been configured for password storage. For more details, see the explanation of the `xpack.security.authc.password_hashing.algorithm` setting.}
@returns(200)
@example_request "{\n  \"password\" : \"new-test-password\"\n}"

@endpoint POST /_security/user/_password
@desc Change passwords
@optional {refresh: str # If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes., password: any # The new password value. Passwords must be at least 6 characters long., password_hash: str # A hash of the new password value. This must be produced using the same hashing algorithm as has been configured for password storage. For more details, see the explanation of the `xpack.security.authc.password_hashing.algorithm` setting.}
@returns(200)
@example_request "{\n  \"password\" : \"new-test-password\"\n}"

@endpoint POST /_security/api_key/{ids}/_clear_cache
@desc Clear the API key cache
@required {ids: any # Comma-separated list of API key IDs to evict from the API key cache. To evict all API keys, use `*`. Does not support other wildcard patterns.}
@returns(200) {_nodes: any, cluster_name: any, nodes: map}

@endpoint POST /_security/privilege/{application}/_clear_cache
@desc Clear the privileges cache
@required {application: any # A comma-separated list of applications. To clear all applications, use an asterism (`*`). It does not support other wildcard patterns.}
@returns(200) {_nodes: any, cluster_name: any, nodes: map}

@endpoint POST /_security/realm/{realms}/_clear_cache
@desc Clear the user cache
@required {realms: any # A comma-separated list of realms. To clear all realms, use an asterisk (`*`). It does not support other wildcard patterns.}
@optional {usernames: [str] # A comma-separated list of the users to clear from the cache. If you do not specify this parameter, the API evicts all users from the user cache.}
@returns(200) {_nodes: any, cluster_name: any, nodes: map}

@endpoint POST /_security/role/{name}/_clear_cache
@desc Clear the roles cache
@required {name: any # A comma-separated list of roles to evict from the role cache. To evict all roles, use an asterisk (`*`). It does not support other wildcard patterns.}
@returns(200) {_nodes: any, cluster_name: any, nodes: map}

@endpoint POST /_security/service/{namespace}/{service}/credential/token/{name}/_clear_cache
@desc Clear service account token caches
@required {namespace: str # The namespace, which is a top-level grouping of service accounts., service: str # The name of the service, which must be unique within its namespace., name: any # A comma-separated list of token names to evict from the service account token caches. Use a wildcard (`*`) to evict all tokens that belong to a service account. It does not support other wildcard patterns.}
@returns(200) {_nodes: any, cluster_name: any, nodes: map}

@endpoint GET /_security/api_key
@desc Get API key information
@optional {id: str # An API key id. This parameter cannot be used with any of `name`, `realm_name` or `username`., name: str # An API key name. This parameter cannot be used with any of `id`, `realm_name` or `username`. It supports prefix search with wildcard., owner: bool # A boolean flag that can be used to query API keys owned by the currently authenticated user. The `realm_name` or `username` parameters cannot be specified when this parameter is set to `true` as they are assumed to be the currently authenticated ones., realm_name: str # The name of an authentication realm. This parameter cannot be used with either `id` or `name` or when `owner` flag is set to `true`., username: str # The username of a user. This parameter cannot be used with either `id` or `name` or when `owner` flag is set to `true`., with_limited_by: bool # Return the snapshot of the owner user's role descriptors associated with the API key. An API key's actual permission is the intersection of its assigned role descriptors and the owner user's role descriptors., active_only: bool # A boolean flag that can be used to query API keys that are currently active. An API key is considered active if it is neither invalidated, nor expired at query time. You can specify this together with other parameters such as `owner` or `name`. If `active_only` is false, the response will include both active and inactive (expired or invalidated) keys., with_profile_uid: bool # Determines whether to also retrieve the profile uid, for the API key owner principal, if it exists.}
@returns(200) {api_keys: [map]}

@endpoint PUT /_security/api_key
@desc Create an API key
@optional {refresh: str # If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes., expiration: any # The expiration time for the API key. By default, API keys never expire., name: any # A name for the API key., role_descriptors: map # An array of role descriptors for this API key. When it is not specified or it is an empty array, the API key will have a point in time snapshot of permissions of the authenticated user. If you supply role descriptors, the resultant permissions are an intersection of API keys permissions and the authenticated user's permissions thereby limiting the access scope for API keys. The structure of role descriptor is the same as the request for the create role API. For more details, refer to the create or update roles API.  NOTE: Due to the way in which this permission intersection is calculated, it is not possible to create an API key that is a child of another API key, unless the derived key is created without any privileges. In this case, you must explicitly specify a role descriptor with no privileges. The derived API key can be used for authentication; it will not have authority to call Elasticsearch APIs., metadata: any # Arbitrary metadata that you want to associate with the API key. It supports nested data structure. Within the metadata object, keys beginning with `_` are reserved for system usage.}
@returns(200) {api_key: str, expiration: num, id: any, name: any, encoded: str}
@example_request "{\n  \"name\": \"my-api-key\",\n  \"expiration\": \"1d\",   \n  \"role_descriptors\": { \n    \"role-a\": {\n      \"cluster\": [\"all\"],\n      \"indices\": [\n        {\n          \"names\": [\"index-a*\"],\n          \"privileges\": [\"read\"]\n        }\n      ]\n    },\n    \"role-b\": {\n      \"cluster\": [\"all\"],\n      \"indices\": [\n        {\n          \"names\": [\"index-b*\"],\n          \"privileges\": [\"all\"]\n        }\n      ]\n    }\n  },\n  \"metadata\": {\n    \"application\": \"my-application\",\n    \"environment\": {\n      \"level\": 1,\n      \"trusted\": true,\n      \"tags\": [\"dev\", \"staging\"]\n    }\n  }\n}"

@endpoint POST /_security/api_key
@desc Create an API key
@optional {refresh: str # If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes., expiration: any # The expiration time for the API key. By default, API keys never expire., name: any # A name for the API key., role_descriptors: map # An array of role descriptors for this API key. When it is not specified or it is an empty array, the API key will have a point in time snapshot of permissions of the authenticated user. If you supply role descriptors, the resultant permissions are an intersection of API keys permissions and the authenticated user's permissions thereby limiting the access scope for API keys. The structure of role descriptor is the same as the request for the create role API. For more details, refer to the create or update roles API.  NOTE: Due to the way in which this permission intersection is calculated, it is not possible to create an API key that is a child of another API key, unless the derived key is created without any privileges. In this case, you must explicitly specify a role descriptor with no privileges. The derived API key can be used for authentication; it will not have authority to call Elasticsearch APIs., metadata: any # Arbitrary metadata that you want to associate with the API key. It supports nested data structure. Within the metadata object, keys beginning with `_` are reserved for system usage.}
@returns(200) {api_key: str, expiration: num, id: any, name: any, encoded: str}
@example_request "{\n  \"name\": \"my-api-key\",\n  \"expiration\": \"1d\",   \n  \"role_descriptors\": { \n    \"role-a\": {\n      \"cluster\": [\"all\"],\n      \"indices\": [\n        {\n          \"names\": [\"index-a*\"],\n          \"privileges\": [\"read\"]\n        }\n      ]\n    },\n    \"role-b\": {\n      \"cluster\": [\"all\"],\n      \"indices\": [\n        {\n          \"names\": [\"index-b*\"],\n          \"privileges\": [\"all\"]\n        }\n      ]\n    }\n  },\n  \"metadata\": {\n    \"application\": \"my-application\",\n    \"environment\": {\n      \"level\": 1,\n      \"trusted\": true,\n      \"tags\": [\"dev\", \"staging\"]\n    }\n  }\n}"

@endpoint DELETE /_security/api_key
@desc Invalidate API keys
@optional {id: any, ids: [str] # A list of API key ids. This parameter cannot be used with any of `name`, `realm_name`, or `username`., name: any # An API key name. This parameter cannot be used with any of `ids`, `realm_name` or `username`., owner: bool=false # Query API keys owned by the currently authenticated user. The `realm_name` or `username` parameters cannot be specified when this parameter is set to `true` as they are assumed to be the currently authenticated ones.  NOTE: At least one of `ids`, `name`, `username`, and `realm_name` must be specified if `owner` is `false`., realm_name: str # The name of an authentication realm. This parameter cannot be used with either `ids` or `name`, or when `owner` flag is set to `true`., username: any # The username of a user. This parameter cannot be used with either `ids` or `name` or when `owner` flag is set to `true`.}
@returns(200) {error_count: num, error_details: [map], invalidated_api_keys: [str], previously_invalidated_api_keys: [str]}
@example_request "{\n  \"ids\" : [ \"VuaCfGcBCdbkQm-e5aOx\" ]\n}"

@endpoint POST /_security/cross_cluster/api_key
@desc Create a cross-cluster API key
@required {access: any # The access to be granted to this API key. The access is composed of permissions for cross-cluster search and cross-cluster replication. At least one of them must be specified.  NOTE: No explicit privileges should be specified for either search or replication access. The creation process automatically converts the access specification to a role descriptor which has relevant privileges assigned accordingly., name: any # Specifies the name for this API key.}
@optional {expiration: any # Expiration time for the API key. By default, API keys never expire., metadata: any # Arbitrary metadata that you want to associate with the API key. It supports nested data structure. Within the metadata object, keys beginning with `_` are reserved for system usage., certificate_identity: str # The certificate identity to associate with this API key. This field is used to restrict the API key to connections authenticated by a specific TLS certificate. The value should match the certificate's distinguished name (DN) pattern.}
@returns(200) {api_key: str, expiration: any, id: any, name: any, encoded: str}
@example_request "{\n  \"name\": \"my-cross-cluster-api-key\",\n  \"expiration\": \"1d\",   \n  \"access\": {\n    \"search\": [  \n      {\n        \"names\": [\"logs*\"]\n      }\n    ],\n    \"replication\": [  \n      {\n        \"names\": [\"archive*\"]\n      }\n    ]\n  },\n  \"metadata\": {\n    \"description\": \"phase one\",\n    \"environment\": {\n      \"level\": 1,\n      \"trusted\": true,\n      \"tags\": [\"dev\", \"staging\"]\n    }\n  }\n}"

@endpoint PUT /_security/service/{namespace}/{service}/credential/token/{name}
@desc Create a service account token
@required {namespace: str # The name of the namespace, which is a top-level grouping of service accounts., service: str # The name of the service., name: str # The name for the service account token. If omitted, a random name will be generated.  Token names must be at least one and no more than 256 characters. They can contain alphanumeric characters (a-z, A-Z, 0-9), dashes (`-`), and underscores (`_`), but cannot begin with an underscore.  NOTE: Token names must be unique in the context of the associated service account. They must also be globally unique with their fully qualified names, which are comprised of the service account principal and token name, such as `//`.}
@optional {refresh: str # If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes.}
@returns(200) {created: bool, token: any}

@endpoint POST /_security/service/{namespace}/{service}/credential/token/{name}
@desc Create a service account token
@required {namespace: str # The name of the namespace, which is a top-level grouping of service accounts., service: str # The name of the service., name: str # The name for the service account token. If omitted, a random name will be generated.  Token names must be at least one and no more than 256 characters. They can contain alphanumeric characters (a-z, A-Z, 0-9), dashes (`-`), and underscores (`_`), but cannot begin with an underscore.  NOTE: Token names must be unique in the context of the associated service account. They must also be globally unique with their fully qualified names, which are comprised of the service account principal and token name, such as `//`.}
@optional {refresh: str # If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes.}
@returns(200) {created: bool, token: any}

@endpoint DELETE /_security/service/{namespace}/{service}/credential/token/{name}
@desc Delete service account tokens
@required {namespace: str # The namespace, which is a top-level grouping of service accounts., service: str # The service name., name: str # The name of the service account token.}
@optional {refresh: str # If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes.}
@returns(200) {found: bool}

@endpoint POST /_security/service/{namespace}/{service}/credential/token
@desc Create a service account token
@required {namespace: str # The name of the namespace, which is a top-level grouping of service accounts., service: str # The name of the service.}
@optional {refresh: str # If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes.}
@returns(200) {created: bool, token: any}

@endpoint POST /_security/delegate_pki
@desc Delegate PKI authentication
@required {x509_certificate_chain: [str] # The X509Certificate chain, which is represented as an ordered string array. Each string in the array is a base64-encoded (Section 4 of RFC4648 - not base64url-encoded) of the certificate's DER encoding.  The first element is the target certificate that contains the subject distinguished name that is requesting access. This may be followed by additional certificates; each subsequent certificate is used to certify the previous one.}
@returns(200) {access_token: str, expires_in: num, type: str, authentication: any}
@example_request "{\n\"x509_certificate_chain\": [\"MIIDeDCCAmCgAwIBAgIUBzj/nGGKxP2iXawsSquHmQjCJmMwDQYJKoZIhvcNAQELBQAwUzErMCkGA1UEAxMiRWxhc3RpY3NlYXJjaCBUZXN0IEludGVybWVkaWF0ZSBDQTEWMBQGA1UECxMNRWxhc3RpY3NlYXJjaDEMMAoGA1UEChMDb3JnMB4XDTIzMDcxODE5MjkwNloXDTQzMDcxMzE5MjkwNlowSjEiMCAGA1UEAxMZRWxhc3RpY3NlYXJjaCBUZXN0IENsaWVudDEWMBQGA1UECxMNRWxhc3RpY3NlYXJjaDEMMAoGA1UEChMDb3JnMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAllHL4pQkkfwAm/oLkxYYO+r950DEy1bjH+4viCHzNADLCTWO+lOZJVlNx7QEzJE3QGMdif9CCBBxQFMapA7oUFCLq84fPSQQu5AnvvbltVD9nwVtCs+9ZGDjMKsz98RhSLMFIkxdxi6HkQ3Lfa4ZSI4lvba4oo+T/GveazBDS+NgmKyq00EOXt3tWi1G9vEVItommzXWfv0agJWzVnLMldwkPqsw0W7zrpyT7FZS4iLbQADGceOW8fiauOGMkscu9zAnDR/SbWl/chYioQOdw6ndFLn1YIFPd37xL0WsdsldTpn0vH3YfzgLMffT/3P6YlwBegWzsx6FnM/93Ecb4wIDAQABo00wSzAJBgNVHRMEAjAAMB0GA1UdDgQWBBQKNRwjW+Ad/FN1Rpoqme/5+jrFWzAfBgNVHSMEGDAWgBRcya0c0x/PaI7MbmJVIylWgLqXNjANBgkqhkiG9w0BAQsFAAOCAQEACZ3PF7Uqu47lplXHP6YlzYL2jL0D28hpj5lGtdha4Muw1m/BjDb0Pu8l0NQ1z3AP6AVcvjNDkQq6Y5jeSz0bwQlealQpYfo7EMXjOidrft1GbqOMFmTBLpLA9SvwYGobSTXWTkJzonqVaTcf80HpMgM2uEhodwTcvz6v1WEfeT/HMjmdIsq4ImrOL9RNrcZG6nWfw0HR3JNOgrbfyEztEI471jHznZ336OEcyX7gQuvHE8tOv5+oD1d7s3Xg1yuFp+Ynh+FfOi3hPCuaHA+7F6fLmzMDLVUBAllugst1C3U+L/paD7tqIa4ka+KNPCbSfwazmJrt4XNiivPR4hwH5g==\"]\n}"

@endpoint GET /_security/privilege/{application}/{name}
@desc Get application privileges
@required {application: str # The name of the application. Application privileges are always associated with exactly one application. If you do not specify this parameter, the API returns information about all privileges for all applications., name: any # The name of the privilege. If you do not specify this parameter, the API returns information about all privileges for the requested application.}
@returns(200)

@endpoint DELETE /_security/privilege/{application}/{name}
@desc Delete application privileges
@required {application: str # The name of the application. Application privileges are always associated with exactly one application., name: any # The name of the privilege.}
@optional {refresh: str # If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes.}
@returns(200)

@endpoint GET /_security/role/{name}
@desc Get roles
@required {name: any # The name of the role. You can specify multiple roles as a comma-separated list. If you do not specify this parameter, the API returns information about all roles.}
@returns(200)

@endpoint PUT /_security/role/{name}
@desc Create or update roles
@required {name: str # The name of the role that is being created or updated. On Elasticsearch Serverless, the role name must begin with a letter or digit and can only contain letters, digits and the characters '_', '-', and '.'. Each role must have a unique name, as this will serve as the identifier for that role.}
@optional {refresh: str # If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes., applications: [map{application!: str, privileges!: [str], resources!: [str]}] # A list of application privilege entries., cluster: [any] # A list of cluster privileges. These privileges define the cluster-level actions for users with this role., global: map # An object defining global privileges. A global privilege is a form of cluster privilege that is request-aware. Support for global privileges is currently limited to the management of application privileges., indices: [map{field_security: any, names!: any, privileges!: [any], query: any, allow_restricted_indices: bool}] # A list of indices permissions entries., remote_indices: [map{clusters!: any, field_security: any, names!: any, privileges!: [any], query: any, allow_restricted_indices: bool}] # A list of remote indices permissions entries.  NOTE: Remote indices are effective for remote clusters configured with the API key based model. They have no effect for remote clusters configured with the certificate based model., remote_cluster: [map{clusters!: any, privileges!: [str]}] # A list of remote cluster permissions entries., metadata: any # Optional metadata. Within the metadata object, keys that begin with an underscore (`_`) are reserved for system use., run_as: [str] # A list of users that the owners of this role can impersonate. *Note*: in Serverless, the run-as feature is disabled. For API compatibility, you can still specify an empty `run_as` field, but a non-empty list will be rejected., description: str # Optional description of the role descriptor, transient_metadata: map # Indicates roles that might be incompatible with the current cluster license, specifically roles with document and field level security. When the cluster license doesn’t allow certain features for a given role, this parameter is updated dynamically to list the incompatible features. If `enabled` is `false`, the role is ignored, but is still listed in the response from the authenticate API.}
@returns(200) {role: any}
@example_request "{\n  \"description\": \"Grants full access to all management features within the cluster.\",\n  \"cluster\": [\"all\"],\n  \"indices\": [\n    {\n      \"names\": [ \"index1\", \"index2\" ],\n      \"privileges\": [\"all\"],\n      \"field_security\" : { // optional\n        \"grant\" : [ \"title\", \"body\" ]\n      },\n      \"query\": \"{\\\"match\\\": {\\\"title\\\": \\\"foo\\\"}}\" // optional\n    }\n  ],\n  \"applications\": [\n    {\n      \"application\": \"myapp\",\n      \"privileges\": [ \"admin\", \"read\" ],\n      \"resources\": [ \"*\" ]\n    }\n  ],\n  \"run_as\": [ \"other_user\" ], // optional\n  \"metadata\" : { // optional\n    \"version\" : 1\n  }\n}"

@endpoint POST /_security/role/{name}
@desc Create or update roles
@required {name: str # The name of the role that is being created or updated. On Elasticsearch Serverless, the role name must begin with a letter or digit and can only contain letters, digits and the characters '_', '-', and '.'. Each role must have a unique name, as this will serve as the identifier for that role.}
@optional {refresh: str # If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes., applications: [map{application!: str, privileges!: [str], resources!: [str]}] # A list of application privilege entries., cluster: [any] # A list of cluster privileges. These privileges define the cluster-level actions for users with this role., global: map # An object defining global privileges. A global privilege is a form of cluster privilege that is request-aware. Support for global privileges is currently limited to the management of application privileges., indices: [map{field_security: any, names!: any, privileges!: [any], query: any, allow_restricted_indices: bool}] # A list of indices permissions entries., remote_indices: [map{clusters!: any, field_security: any, names!: any, privileges!: [any], query: any, allow_restricted_indices: bool}] # A list of remote indices permissions entries.  NOTE: Remote indices are effective for remote clusters configured with the API key based model. They have no effect for remote clusters configured with the certificate based model., remote_cluster: [map{clusters!: any, privileges!: [str]}] # A list of remote cluster permissions entries., metadata: any # Optional metadata. Within the metadata object, keys that begin with an underscore (`_`) are reserved for system use., run_as: [str] # A list of users that the owners of this role can impersonate. *Note*: in Serverless, the run-as feature is disabled. For API compatibility, you can still specify an empty `run_as` field, but a non-empty list will be rejected., description: str # Optional description of the role descriptor, transient_metadata: map # Indicates roles that might be incompatible with the current cluster license, specifically roles with document and field level security. When the cluster license doesn’t allow certain features for a given role, this parameter is updated dynamically to list the incompatible features. If `enabled` is `false`, the role is ignored, but is still listed in the response from the authenticate API.}
@returns(200) {role: any}
@example_request "{\n  \"description\": \"Grants full access to all management features within the cluster.\",\n  \"cluster\": [\"all\"],\n  \"indices\": [\n    {\n      \"names\": [ \"index1\", \"index2\" ],\n      \"privileges\": [\"all\"],\n      \"field_security\" : { // optional\n        \"grant\" : [ \"title\", \"body\" ]\n      },\n      \"query\": \"{\\\"match\\\": {\\\"title\\\": \\\"foo\\\"}}\" // optional\n    }\n  ],\n  \"applications\": [\n    {\n      \"application\": \"myapp\",\n      \"privileges\": [ \"admin\", \"read\" ],\n      \"resources\": [ \"*\" ]\n    }\n  ],\n  \"run_as\": [ \"other_user\" ], // optional\n  \"metadata\" : { // optional\n    \"version\" : 1\n  }\n}"

@endpoint DELETE /_security/role/{name}
@desc Delete roles
@required {name: str # The name of the role.}
@optional {refresh: str # If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes.}
@returns(200) {found: bool}

@endpoint GET /_security/role_mapping/{name}
@desc Get role mappings
@required {name: any # The distinct name that identifies the role mapping. The name is used solely as an identifier to facilitate interaction via the API; it does not affect the behavior of the mapping in any way. You can specify multiple mapping names as a comma-separated list. If you do not specify this parameter, the API returns information about all role mappings.}
@returns(200)

@endpoint PUT /_security/role_mapping/{name}
@desc Create or update role mappings
@required {name: str # The distinct name that identifies the role mapping. The name is used solely as an identifier to facilitate interaction via the API; it does not affect the behavior of the mapping in any way.}
@optional {refresh: str # If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes., enabled: bool # Mappings that have `enabled` set to `false` are ignored when role mapping is performed., metadata: any # Additional metadata that helps define which roles are assigned to each user. Within the metadata object, keys beginning with `_` are reserved for system usage., roles: [str] # A list of role names that are granted to the users that match the role mapping rules. Exactly one of `roles` or `role_templates` must be specified., role_templates: [map{format: any, template!: any}] # A list of Mustache templates that will be evaluated to determine the roles names that should granted to the users that match the role mapping rules. Exactly one of `roles` or `role_templates` must be specified., rules: any # The rules that determine which users should be matched by the mapping. A rule is a logical condition that is expressed by using a JSON DSL., run_as: [str]}
@returns(200) {created: bool, role_mapping: any}
@example_request "{\n  \"roles\": [ \"user\"],\n  \"enabled\": true, \n  \"rules\": {\n    \"field\" : { \"username\" : \"*\" }\n  },\n  \"metadata\" : { \n    \"version\" : 1\n  }\n}"

@endpoint POST /_security/role_mapping/{name}
@desc Create or update role mappings
@required {name: str # The distinct name that identifies the role mapping. The name is used solely as an identifier to facilitate interaction via the API; it does not affect the behavior of the mapping in any way.}
@optional {refresh: str # If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes., enabled: bool # Mappings that have `enabled` set to `false` are ignored when role mapping is performed., metadata: any # Additional metadata that helps define which roles are assigned to each user. Within the metadata object, keys beginning with `_` are reserved for system usage., roles: [str] # A list of role names that are granted to the users that match the role mapping rules. Exactly one of `roles` or `role_templates` must be specified., role_templates: [map{format: any, template!: any}] # A list of Mustache templates that will be evaluated to determine the roles names that should granted to the users that match the role mapping rules. Exactly one of `roles` or `role_templates` must be specified., rules: any # The rules that determine which users should be matched by the mapping. A rule is a logical condition that is expressed by using a JSON DSL., run_as: [str]}
@returns(200) {created: bool, role_mapping: any}
@example_request "{\n  \"roles\": [ \"user\"],\n  \"enabled\": true, \n  \"rules\": {\n    \"field\" : { \"username\" : \"*\" }\n  },\n  \"metadata\" : { \n    \"version\" : 1\n  }\n}"

@endpoint DELETE /_security/role_mapping/{name}
@desc Delete role mappings
@required {name: str # The distinct name that identifies the role mapping. The name is used solely as an identifier to facilitate interaction via the API; it does not affect the behavior of the mapping in any way.}
@optional {refresh: str # If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes.}
@returns(200) {found: bool}

@endpoint GET /_security/user/{username}
@desc Get users
@required {username: any # An identifier for the user. You can specify multiple usernames as a comma-separated list. If you omit this parameter, the API retrieves information about all users.}
@optional {with_profile_uid: bool # Determines whether to retrieve the user profile UID, if it exists, for the users.}
@returns(200)

@endpoint PUT /_security/user/{username}
@desc Create or update users
@required {username: str # An identifier for the user.  NOTE: Usernames must be at least 1 and no more than 507 characters. They can contain alphanumeric characters (a-z, A-Z, 0-9), spaces, punctuation, and printable symbols in the Basic Latin (ASCII) block. Leading or trailing whitespace is not allowed.}
@optional {refresh: str # Valid values are `true`, `false`, and `wait_for`. These values have the same meaning as in the index API, but the default value for this API is true., username: any, email: any # The email of the user., full_name: any # The full name of the user., metadata: any # Arbitrary metadata that you want to associate with the user., password: any # The user's password. Passwords must be at least 6 characters long. When adding a user, one of `password` or `password_hash` is required. When updating an existing user, the password is optional, so that other fields on the user (such as their roles) may be updated without modifying the user's password, password_hash: str # A hash of the user's password. This must be produced using the same hashing algorithm as has been configured for password storage. For more details, see the explanation of the `xpack.security.authc.password_hashing.algorithm` setting in the user cache and password hash algorithm documentation. Using this parameter allows the client to pre-hash the password for performance and/or confidentiality reasons. The `password` parameter and the `password_hash` parameter cannot be used in the same request., roles: [str] # A set of roles the user has. The roles determine the user's access permissions. To create a user without any roles, specify an empty list (`[]`)., enabled: bool=true # Specifies whether the user is enabled.}
@returns(200) {created: bool}
@example_request "{\n  \"password\" : \"l0ng-r4nd0m-p@ssw0rd\",\n  \"roles\" : [ \"admin\", \"other_role1\" ],\n  \"full_name\" : \"Jack Nicholson\",\n  \"email\" : \"jacknich@example.com\",\n  \"metadata\" : {\n    \"intelligence\" : 7\n  }\n}"

@endpoint POST /_security/user/{username}
@desc Create or update users
@required {username: str # An identifier for the user.  NOTE: Usernames must be at least 1 and no more than 507 characters. They can contain alphanumeric characters (a-z, A-Z, 0-9), spaces, punctuation, and printable symbols in the Basic Latin (ASCII) block. Leading or trailing whitespace is not allowed.}
@optional {refresh: str # Valid values are `true`, `false`, and `wait_for`. These values have the same meaning as in the index API, but the default value for this API is true., username: any, email: any # The email of the user., full_name: any # The full name of the user., metadata: any # Arbitrary metadata that you want to associate with the user., password: any # The user's password. Passwords must be at least 6 characters long. When adding a user, one of `password` or `password_hash` is required. When updating an existing user, the password is optional, so that other fields on the user (such as their roles) may be updated without modifying the user's password, password_hash: str # A hash of the user's password. This must be produced using the same hashing algorithm as has been configured for password storage. For more details, see the explanation of the `xpack.security.authc.password_hashing.algorithm` setting in the user cache and password hash algorithm documentation. Using this parameter allows the client to pre-hash the password for performance and/or confidentiality reasons. The `password` parameter and the `password_hash` parameter cannot be used in the same request., roles: [str] # A set of roles the user has. The roles determine the user's access permissions. To create a user without any roles, specify an empty list (`[]`)., enabled: bool=true # Specifies whether the user is enabled.}
@returns(200) {created: bool}
@example_request "{\n  \"password\" : \"l0ng-r4nd0m-p@ssw0rd\",\n  \"roles\" : [ \"admin\", \"other_role1\" ],\n  \"full_name\" : \"Jack Nicholson\",\n  \"email\" : \"jacknich@example.com\",\n  \"metadata\" : {\n    \"intelligence\" : 7\n  }\n}"

@endpoint DELETE /_security/user/{username}
@desc Delete users
@required {username: str # An identifier for the user.}
@optional {refresh: str # If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes.}
@returns(200) {found: bool}

@endpoint PUT /_security/user/{username}/_disable
@desc Disable users
@required {username: str # An identifier for the user.}
@optional {refresh: str # If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes.}
@returns(200)

@endpoint POST /_security/user/{username}/_disable
@desc Disable users
@required {username: str # An identifier for the user.}
@optional {refresh: str # If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes.}
@returns(200)

@endpoint PUT /_security/profile/{uid}/_disable
@desc Disable a user profile
@required {uid: str # Unique identifier for the user profile.}
@optional {refresh: str # If 'true', Elasticsearch refreshes the affected shards to make this operation visible to search. If 'wait_for', it waits for a refresh to make this operation visible to search. If 'false', it does nothing with refreshes.}
@returns(200) {acknowledged: bool}

@endpoint POST /_security/profile/{uid}/_disable
@desc Disable a user profile
@required {uid: str # Unique identifier for the user profile.}
@optional {refresh: str # If 'true', Elasticsearch refreshes the affected shards to make this operation visible to search. If 'wait_for', it waits for a refresh to make this operation visible to search. If 'false', it does nothing with refreshes.}
@returns(200) {acknowledged: bool}

@endpoint PUT /_security/user/{username}/_enable
@desc Enable users
@required {username: str # An identifier for the user.}
@optional {refresh: str # If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes.}
@returns(200)

@endpoint POST /_security/user/{username}/_enable
@desc Enable users
@required {username: str # An identifier for the user.}
@optional {refresh: str # If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes.}
@returns(200)

@endpoint PUT /_security/profile/{uid}/_enable
@desc Enable a user profile
@required {uid: str # A unique identifier for the user profile.}
@optional {refresh: str # If 'true', Elasticsearch refreshes the affected shards to make this operation visible to search. If 'wait_for', it waits for a refresh to make this operation visible to search. If 'false', nothing is done with refreshes.}
@returns(200) {acknowledged: bool}

@endpoint POST /_security/profile/{uid}/_enable
@desc Enable a user profile
@required {uid: str # A unique identifier for the user profile.}
@optional {refresh: str # If 'true', Elasticsearch refreshes the affected shards to make this operation visible to search. If 'wait_for', it waits for a refresh to make this operation visible to search. If 'false', nothing is done with refreshes.}
@returns(200) {acknowledged: bool}

@endpoint GET /_security/enroll/kibana
@desc Enroll Kibana
@returns(200) {token: any, http_ca: str}

@endpoint GET /_security/enroll/node
@desc Enroll a node
@returns(200) {http_ca_key: str, http_ca_cert: str, transport_ca_cert: str, transport_key: str, transport_cert: str, nodes_addresses: [str]}

@endpoint GET /_security/privilege/_builtin
@desc Get builtin privileges
@returns(200) {cluster: [any], index: [str], remote_cluster: [str]}

@endpoint GET /_security/privilege
@desc Get application privileges
@returns(200)

@endpoint PUT /_security/privilege
@desc Create or update application privileges
@optional {refresh: str # If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes.}
@returns(200)
@example_request "{\n  \"myapp\": {\n    \"read\": {\n      \"actions\": [ \n        \"data:read/*\" , \n        \"action:login\" ],\n        \"metadata\": { \n          \"description\": \"Read access to myapp\"\n        }\n      }\n    }\n}"

@endpoint POST /_security/privilege
@desc Create or update application privileges
@optional {refresh: str # If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes.}
@returns(200)
@example_request "{\n  \"myapp\": {\n    \"read\": {\n      \"actions\": [ \n        \"data:read/*\" , \n        \"action:login\" ],\n        \"metadata\": { \n          \"description\": \"Read access to myapp\"\n        }\n      }\n    }\n}"

@endpoint GET /_security/privilege/{application}
@desc Get application privileges
@required {application: str # The name of the application. Application privileges are always associated with exactly one application. If you do not specify this parameter, the API returns information about all privileges for all applications.}
@returns(200)

@endpoint GET /_security/role_mapping
@desc Get role mappings
@returns(200)

@endpoint GET /_security/service/{namespace}/{service}
@desc Get service accounts
@required {namespace: str # The name of the namespace. Omit this parameter to retrieve information about all service accounts. If you omit this parameter, you must also omit the `service` parameter., service: str # The service name. Omit this parameter to retrieve information about all service accounts that belong to the specified `namespace`.}
@returns(200)

@endpoint GET /_security/service/{namespace}
@desc Get service accounts
@required {namespace: str # The name of the namespace. Omit this parameter to retrieve information about all service accounts. If you omit this parameter, you must also omit the `service` parameter.}
@returns(200)

@endpoint GET /_security/service
@desc Get service accounts
@returns(200)

@endpoint GET /_security/service/{namespace}/{service}/credential
@desc Get service account credentials
@required {namespace: str # The name of the namespace., service: str # The service name.}
@returns(200) {service_account: str, count: num, tokens: map, nodes_credentials: any}

@endpoint GET /_security/settings
@desc Get security index settings
@optional {master_timeout: any # Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error.}
@returns(200) {security: any, security-profile: any, security-tokens: any}

@endpoint PUT /_security/settings
@desc Update security index settings
@optional {master_timeout: any # The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error., timeout: any # The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error., security: any # Settings for the index used for most security configuration, including native realm users and roles configured with the API., security-profile: any # Settings for the index used to store profile information., security-tokens: any # Settings for the index used to store tokens.}
@returns(200) {acknowledged: bool}
@example_request "{\n    \"security\": {\n        \"index.auto_expand_replicas\": \"0-all\"\n    },\n    \"security-tokens\": {\n        \"index.auto_expand_replicas\": \"0-all\"\n    },\n    \"security-profile\": {\n        \"index.auto_expand_replicas\": \"0-all\"\n    }\n}"

@endpoint GET /_security/stats
@desc Get security stats
@returns(200) {nodes: map}

@endpoint POST /_security/oauth2/token
@desc Get a token
@optional {grant_type: any # The type of grant. Supported grant types are: `password`, `_kerberos`, `client_credentials`, and `refresh_token`., scope: str # The scope of the token. Currently tokens are only issued for a scope of FULL regardless of the value sent with the request., password: any # The user's password. If you specify the `password` grant type, this parameter is required. This parameter is not valid with any other supported grant type., kerberos_ticket: str # The base64 encoded kerberos ticket. If you specify the `_kerberos` grant type, this parameter is required. This parameter is not valid with any other supported grant type., refresh_token: str # The string that was returned when you created the token, which enables you to extend its life. If you specify the `refresh_token` grant type, this parameter is required. This parameter is not valid with any other supported grant type., username: any # The username that identifies the user. If you specify the `password` grant type, this parameter is required. This parameter is not valid with any other supported grant type.}
@returns(200) {access_token: str, expires_in: num, scope: str, type: str, refresh_token: str, kerberos_authentication_response_token: str, authentication: any}
@example_request "{\n  \"grant_type\" : \"client_credentials\"\n}"

@endpoint DELETE /_security/oauth2/token
@desc Invalidate a token
@optional {token: str # An access token. This parameter cannot be used if any of `refresh_token`, `realm_name`, or `username` are used., refresh_token: str # A refresh token. This parameter cannot be used if any of `refresh_token`, `realm_name`, or `username` are used., realm_name: any # The name of an authentication realm. This parameter cannot be used with either `refresh_token` or `token`., username: any # The username of a user. This parameter cannot be used with either `refresh_token` or `token`.}
@returns(200) {error_count: num, error_details: [map], invalidated_tokens: num, previously_invalidated_tokens: num}
@example_request "{\n  \"token\" : \"dGhpcyBpcyBub3QgYSByZWFsIHRva2VuIGJ1dCBpdCBpcyBvbmx5IHRlc3QgZGF0YS4gZG8gbm90IHRyeSB0byByZWFkIHRva2VuIQ==\"\n}"

@endpoint GET /_security/user
@desc Get users
@optional {with_profile_uid: bool # Determines whether to retrieve the user profile UID, if it exists, for the users.}
@returns(200)

@endpoint GET /_security/user/_privileges
@desc Get user privileges
@returns(200) {applications: [map], cluster: [str], remote_cluster: [map], global: [map], indices: [map], remote_indices: [map], run_as: [str]}

@endpoint GET /_security/profile/{uid}
@desc Get a user profile
@required {uid: any # A unique identifier for the user profile.}
@optional {data: any # A comma-separated list of filters for the `data` field of the profile document. To return all content use `data=*`. To return a subset of content use `data=` to retrieve content nested under the specified ``. By default returns no `data` content.}
@returns(200) {profiles: [any], errors: any}

@endpoint POST /_security/api_key/grant
@desc Grant an API key
@required {api_key: any # The API key., grant_type: any # The type of grant. Supported grant types are: `access_token`, `password`.}
@optional {refresh: str # If 'true', Elasticsearch refreshes the affected shards to make this operation visible to search. If 'wait_for', it waits for a refresh to make this operation visible to search. If 'false', nothing is done with refreshes., access_token: str # The user's access token. If you specify the `access_token` grant type, this parameter is required. It is not valid with other grant types., username: any # The user name that identifies the user. If you specify the `password` grant type, this parameter is required. It is not valid with other grant types., password: any # The user's password. If you specify the `password` grant type, this parameter is required. It is not valid with other grant types., run_as: any # The name of the user to be impersonated.}
@returns(200) {api_key: str, id: any, name: any, expiration: any, encoded: str}
@example_request "{\n  \"grant_type\": \"password\",\n  \"username\" : \"test_admin\",\n  \"password\" : \"x-pack-test-password\",\n  \"api_key\" : {\n    \"name\": \"my-api-key\",\n    \"expiration\": \"1d\",\n    \"role_descriptors\": {\n      \"role-a\": {\n        \"cluster\": [\"all\"],\n        \"indices\": [\n          {\n          \"names\": [\"index-a*\"],\n          \"privileges\": [\"read\"]\n          }\n        ]\n      },\n      \"role-b\": {\n        \"cluster\": [\"all\"],\n        \"indices\": [\n          {\n          \"names\": [\"index-b*\"],\n          \"privileges\": [\"all\"]\n          }\n        ]\n      }\n    },\n    \"metadata\": {\n      \"application\": \"my-application\",\n      \"environment\": {\n        \"level\": 1,\n        \"trusted\": true,\n        \"tags\": [\"dev\", \"staging\"]\n      }\n    }\n  }\n}"

@endpoint GET /_security/user/_has_privileges
@desc Check user privileges
@optional {application: [map{application!: str, privileges!: [str], resources!: [str]}], cluster: [any] # A list of the cluster privileges that you want to check., index: [map{names!: any, privileges!: [any], allow_restricted_indices: bool}]}
@returns(200) {application: any, cluster: map, has_all_requested: bool, index: map, username: any}
@example_request "{\n  \"cluster\": [ \"monitor\", \"manage\" ],\n  \"index\" : [\n    {\n      \"names\": [ \"suppliers\", \"products\" ],\n      \"privileges\": [ \"read\" ]\n    },\n    {\n      \"names\": [ \"inventory\" ],\n      \"privileges\" : [ \"read\", \"write\" ]\n    }\n  ],\n  \"application\": [\n    {\n      \"application\": \"inventory_manager\",\n      \"privileges\" : [ \"read\", \"data:write/inventory\" ],\n      \"resources\" : [ \"product/1852563\" ]\n    }\n  ]\n}"

@endpoint POST /_security/user/_has_privileges
@desc Check user privileges
@optional {application: [map{application!: str, privileges!: [str], resources!: [str]}], cluster: [any] # A list of the cluster privileges that you want to check., index: [map{names!: any, privileges!: [any], allow_restricted_indices: bool}]}
@returns(200) {application: any, cluster: map, has_all_requested: bool, index: map, username: any}
@example_request "{\n  \"cluster\": [ \"monitor\", \"manage\" ],\n  \"index\" : [\n    {\n      \"names\": [ \"suppliers\", \"products\" ],\n      \"privileges\": [ \"read\" ]\n    },\n    {\n      \"names\": [ \"inventory\" ],\n      \"privileges\" : [ \"read\", \"write\" ]\n    }\n  ],\n  \"application\": [\n    {\n      \"application\": \"inventory_manager\",\n      \"privileges\" : [ \"read\", \"data:write/inventory\" ],\n      \"resources\" : [ \"product/1852563\" ]\n    }\n  ]\n}"

@endpoint GET /_security/user/{user}/_has_privileges
@desc Check user privileges
@required {user: str # Username}
@optional {application: [map{application!: str, privileges!: [str], resources!: [str]}], cluster: [any] # A list of the cluster privileges that you want to check., index: [map{names!: any, privileges!: [any], allow_restricted_indices: bool}]}
@returns(200) {application: any, cluster: map, has_all_requested: bool, index: map, username: any}
@example_request "{\n  \"cluster\": [ \"monitor\", \"manage\" ],\n  \"index\" : [\n    {\n      \"names\": [ \"suppliers\", \"products\" ],\n      \"privileges\": [ \"read\" ]\n    },\n    {\n      \"names\": [ \"inventory\" ],\n      \"privileges\" : [ \"read\", \"write\" ]\n    }\n  ],\n  \"application\": [\n    {\n      \"application\": \"inventory_manager\",\n      \"privileges\" : [ \"read\", \"data:write/inventory\" ],\n      \"resources\" : [ \"product/1852563\" ]\n    }\n  ]\n}"

@endpoint POST /_security/user/{user}/_has_privileges
@desc Check user privileges
@required {user: str # Username}
@optional {application: [map{application!: str, privileges!: [str], resources!: [str]}], cluster: [any] # A list of the cluster privileges that you want to check., index: [map{names!: any, privileges!: [any], allow_restricted_indices: bool}]}
@returns(200) {application: any, cluster: map, has_all_requested: bool, index: map, username: any}
@example_request "{\n  \"cluster\": [ \"monitor\", \"manage\" ],\n  \"index\" : [\n    {\n      \"names\": [ \"suppliers\", \"products\" ],\n      \"privileges\": [ \"read\" ]\n    },\n    {\n      \"names\": [ \"inventory\" ],\n      \"privileges\" : [ \"read\", \"write\" ]\n    }\n  ],\n  \"application\": [\n    {\n      \"application\": \"inventory_manager\",\n      \"privileges\" : [ \"read\", \"data:write/inventory\" ],\n      \"resources\" : [ \"product/1852563\" ]\n    }\n  ]\n}"

@endpoint GET /_security/profile/_has_privileges
@desc Check user profile privileges
@required {uids: [str] # A list of profile IDs. The privileges are checked for associated users of the profiles., privileges: any # An object containing all the privileges to be checked.}
@returns(200) {has_privilege_uids: [str], errors: any}
@example_request "{\n  \"uids\": [\n    \"u_LQPnxDxEjIH0GOUoFkZr5Y57YUwSkL9Joiq-g4OCbPc_0\",\n    \"u_rzRnxDgEHIH0GOUoFkZr5Y27YUwSk19Joiq=g4OCxxB_1\",\n    \"u_does-not-exist_0\"\n  ],\n  \"privileges\": {\n    \"cluster\": [ \"monitor\", \"create_snapshot\", \"manage_ml\" ],\n    \"index\" : [\n      {\n        \"names\": [ \"suppliers\", \"products\" ],\n        \"privileges\": [ \"create_doc\"]\n      },\n      {\n        \"names\": [ \"inventory\" ],\n        \"privileges\" : [ \"read\", \"write\" ]\n      }\n    ],\n    \"application\": [\n      {\n        \"application\": \"inventory_manager\",\n        \"privileges\" : [ \"read\", \"data:write/inventory\" ],\n        \"resources\" : [ \"product/1852563\" ]\n      }\n    ]\n  }\n}"

@endpoint POST /_security/profile/_has_privileges
@desc Check user profile privileges
@required {uids: [str] # A list of profile IDs. The privileges are checked for associated users of the profiles., privileges: any # An object containing all the privileges to be checked.}
@returns(200) {has_privilege_uids: [str], errors: any}
@example_request "{\n  \"uids\": [\n    \"u_LQPnxDxEjIH0GOUoFkZr5Y57YUwSkL9Joiq-g4OCbPc_0\",\n    \"u_rzRnxDgEHIH0GOUoFkZr5Y27YUwSk19Joiq=g4OCxxB_1\",\n    \"u_does-not-exist_0\"\n  ],\n  \"privileges\": {\n    \"cluster\": [ \"monitor\", \"create_snapshot\", \"manage_ml\" ],\n    \"index\" : [\n      {\n        \"names\": [ \"suppliers\", \"products\" ],\n        \"privileges\": [ \"create_doc\"]\n      },\n      {\n        \"names\": [ \"inventory\" ],\n        \"privileges\" : [ \"read\", \"write\" ]\n      }\n    ],\n    \"application\": [\n      {\n        \"application\": \"inventory_manager\",\n        \"privileges\" : [ \"read\", \"data:write/inventory\" ],\n        \"resources\" : [ \"product/1852563\" ]\n      }\n    ]\n  }\n}"

@endpoint POST /_security/oidc/authenticate
@desc Authenticate OpenID Connect
@required {nonce: str # Associate a client session with an ID token and mitigate replay attacks. This value needs to be the same as the one that was provided to the `/_security/oidc/prepare` API or the one that was generated by Elasticsearch and included in the response to that call., redirect_uri: str # The URL to which the OpenID Connect Provider redirected the User Agent in response to an authentication request after a successful authentication. This URL must be provided as-is (URL encoded), taken from the body of the response or as the value of a location header in the response from the OpenID Connect Provider., state: str # Maintain state between the authentication request and the response. This value needs to be the same as the one that was provided to the `/_security/oidc/prepare` API or the one that was generated by Elasticsearch and included in the response to that call.}
@optional {realm: str # The name of the OpenID Connect realm. This property is useful in cases where multiple realms are defined.}
@returns(200) {access_token: str, expires_in: num, refresh_token: str, type: str}
@example_request "{\n  \"redirect_uri\" : \"https://oidc-kibana.elastic.co:5603/api/security/oidc/callback?code=jtI3Ntt8v3_XvcLzCFGq&state=4dbrihtIAt3wBTwo6DxK-vdk-sSyDBV8Yf0AjdkdT5I\",\n  \"state\" : \"4dbrihtIAt3wBTwo6DxK-vdk-sSyDBV8Yf0AjdkdT5I\",\n  \"nonce\" : \"WaBPH0KqPVdG5HHdSxPRjfoZbXMCicm5v1OiAj0DUFM\",\n  \"realm\" : \"oidc1\"\n}"

@endpoint POST /_security/oidc/logout
@desc Logout of OpenID Connect
@required {token: str # The access token to be invalidated.}
@optional {refresh_token: str # The refresh token to be invalidated.}
@returns(200) {redirect: str}
@example_request "{\n  \"token\" : \"dGhpcyBpcyBub3QgYSByZWFsIHRva2VuIGJ1dCBpdCBpcyBvbmx5IHRlc3QgZGF0YS4gZG8gbm90IHRyeSB0byByZWFkIHRva2VuIQ==\",\n  \"refresh_token\": \"vLBPvmAB6KvwvJZr27cS\"\n}"

@endpoint POST /_security/oidc/prepare
@desc Prepare OpenID connect authentication
@optional {iss: str # In the case of a third party initiated single sign on, this is the issuer identifier for the OP that the RP is to send the authentication request to. It cannot be specified when *realm* is specified. One of *realm* or *iss* is required., login_hint: str # In the case of a third party initiated single sign on, it is a string value that is included in the authentication request as the *login_hint* parameter. This parameter is not valid when *realm* is specified., nonce: str # The value used to associate a client session with an ID token and to mitigate replay attacks. If the caller of the API does not provide a value, Elasticsearch will generate one with sufficient entropy and return it in the response., realm: str # The name of the OpenID Connect realm in Elasticsearch the configuration of which should be used in order to generate the authentication request. It cannot be specified when *iss* is specified. One of *realm* or *iss* is required., state: str # The value used to maintain state between the authentication request and the response, typically used as a Cross-Site Request Forgery mitigation. If the caller of the API does not provide a value, Elasticsearch will generate one with sufficient entropy and return it in the response.}
@returns(200) {nonce: str, realm: str, redirect: str, state: str}
@example_request "{\n  \"realm\" : \"oidc1\"\n}"

@endpoint GET /_security/_query/api_key
@desc Find API keys with a query
@optional {with_limited_by: bool # Return the snapshot of the owner user's role descriptors associated with the API key. An API key's actual permission is the intersection of its assigned role descriptors and the owner user's role descriptors (effectively limited by it). An API key cannot retrieve any API key’s limited-by role descriptors (including itself) unless it has `manage_api_key` or higher privileges., with_profile_uid: bool # Determines whether to also retrieve the profile UID for the API key owner principal. If it exists, the profile UID is returned under the `profile_uid` response field for each API key., typed_keys: bool # Determines whether aggregation names are prefixed by their respective types in the response., aggregations: map # Any aggregations to run over the corpus of returned API keys. Aggregations and queries work together. Aggregations are computed only on the API keys that match the query. This supports only a subset of aggregation types, namely: `terms`, `range`, `date_range`, `missing`, `cardinality`, `value_count`, `composite`, `filter`, and `filters`. Additionally, aggregations only run over the same subset of fields that query works with., query: any # A query to filter which API keys to return. If the query parameter is missing, it is equivalent to a `match_all` query. The query supports a subset of query types, including `match_all`, `bool`, `term`, `terms`, `match`, `ids`, `prefix`, `wildcard`, `exists`, `range`, and `simple_query_string`. You can query the following public information associated with an API key: `id`, `type`, `name`, `creation`, `expiration`, `invalidated`, `invalidation`, `username`, `realm`, and `metadata`.  NOTE: The queryable string values associated with API keys are internally mapped as keywords. Consequently, if no `analyzer` parameter is specified for a `match` query, then the provided match query string is interpreted as a single keyword value. Such a match query is hence equivalent to a `term` query., from: num=0 # The starting document offset. It must not be negative. By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. To page through more hits, use the `search_after` parameter., sort: any # The sort definition. Other than `id`, all public fields of an API key are eligible for sorting. In addition, sort can also be applied to the `_doc` field to sort by index order., size: num=10 # The number of hits to return. It must not be negative. The `size` parameter can be set to `0`, in which case no API key matches are returned, only the aggregation results. By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. To page through more hits, use the `search_after` parameter., search_after: any # The search after definition.}
@returns(200) {total: num, count: num, api_keys: [map], aggregations: map}
@example_request "{\n  \"query\": {\n    \"ids\": {\n      \"values\": [\n        \"VuaCfGcBCdbkQm-e5aOx\"\n      ]\n    }\n  }\n}"

@endpoint POST /_security/_query/api_key
@desc Find API keys with a query
@optional {with_limited_by: bool # Return the snapshot of the owner user's role descriptors associated with the API key. An API key's actual permission is the intersection of its assigned role descriptors and the owner user's role descriptors (effectively limited by it). An API key cannot retrieve any API key’s limited-by role descriptors (including itself) unless it has `manage_api_key` or higher privileges., with_profile_uid: bool # Determines whether to also retrieve the profile UID for the API key owner principal. If it exists, the profile UID is returned under the `profile_uid` response field for each API key., typed_keys: bool # Determines whether aggregation names are prefixed by their respective types in the response., aggregations: map # Any aggregations to run over the corpus of returned API keys. Aggregations and queries work together. Aggregations are computed only on the API keys that match the query. This supports only a subset of aggregation types, namely: `terms`, `range`, `date_range`, `missing`, `cardinality`, `value_count`, `composite`, `filter`, and `filters`. Additionally, aggregations only run over the same subset of fields that query works with., query: any # A query to filter which API keys to return. If the query parameter is missing, it is equivalent to a `match_all` query. The query supports a subset of query types, including `match_all`, `bool`, `term`, `terms`, `match`, `ids`, `prefix`, `wildcard`, `exists`, `range`, and `simple_query_string`. You can query the following public information associated with an API key: `id`, `type`, `name`, `creation`, `expiration`, `invalidated`, `invalidation`, `username`, `realm`, and `metadata`.  NOTE: The queryable string values associated with API keys are internally mapped as keywords. Consequently, if no `analyzer` parameter is specified for a `match` query, then the provided match query string is interpreted as a single keyword value. Such a match query is hence equivalent to a `term` query., from: num=0 # The starting document offset. It must not be negative. By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. To page through more hits, use the `search_after` parameter., sort: any # The sort definition. Other than `id`, all public fields of an API key are eligible for sorting. In addition, sort can also be applied to the `_doc` field to sort by index order., size: num=10 # The number of hits to return. It must not be negative. The `size` parameter can be set to `0`, in which case no API key matches are returned, only the aggregation results. By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. To page through more hits, use the `search_after` parameter., search_after: any # The search after definition.}
@returns(200) {total: num, count: num, api_keys: [map], aggregations: map}
@example_request "{\n  \"query\": {\n    \"ids\": {\n      \"values\": [\n        \"VuaCfGcBCdbkQm-e5aOx\"\n      ]\n    }\n  }\n}"

@endpoint GET /_security/_query/role
@desc Find roles with a query
@optional {query: any # A query to filter which roles to return. If the query parameter is missing, it is equivalent to a `match_all` query. The query supports a subset of query types, including `match_all`, `bool`, `term`, `terms`, `match`, `ids`, `prefix`, `wildcard`, `exists`, `range`, and `simple_query_string`. You can query the following information associated with roles: `name`, `description`, `metadata`, `applications.application`, `applications.privileges`, and `applications.resources`., from: num=0 # The starting document offset. It must not be negative. By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. To page through more hits, use the `search_after` parameter., sort: any # The sort definition. You can sort on `name`, `description`, `metadata`, `applications.application`, `applications.privileges`, and `applications.resources`. In addition, sort can also be applied to the `_doc` field to sort by index order., size: num=10 # The number of hits to return. It must not be negative. By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. To page through more hits, use the `search_after` parameter., search_after: any # The search after definition.}
@returns(200) {total: num, count: num, roles: [any]}
@example_request "{\n    \"sort\": [\"name\"]\n}"

@endpoint POST /_security/_query/role
@desc Find roles with a query
@optional {query: any # A query to filter which roles to return. If the query parameter is missing, it is equivalent to a `match_all` query. The query supports a subset of query types, including `match_all`, `bool`, `term`, `terms`, `match`, `ids`, `prefix`, `wildcard`, `exists`, `range`, and `simple_query_string`. You can query the following information associated with roles: `name`, `description`, `metadata`, `applications.application`, `applications.privileges`, and `applications.resources`., from: num=0 # The starting document offset. It must not be negative. By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. To page through more hits, use the `search_after` parameter., sort: any # The sort definition. You can sort on `name`, `description`, `metadata`, `applications.application`, `applications.privileges`, and `applications.resources`. In addition, sort can also be applied to the `_doc` field to sort by index order., size: num=10 # The number of hits to return. It must not be negative. By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. To page through more hits, use the `search_after` parameter., search_after: any # The search after definition.}
@returns(200) {total: num, count: num, roles: [any]}
@example_request "{\n    \"sort\": [\"name\"]\n}"

@endpoint GET /_security/_query/user
@desc Find users with a query
@optional {with_profile_uid: bool # Determines whether to retrieve the user profile UID, if it exists, for the users., query: any # A query to filter which users to return. If the query parameter is missing, it is equivalent to a `match_all` query. The query supports a subset of query types, including `match_all`, `bool`, `term`, `terms`, `match`, `ids`, `prefix`, `wildcard`, `exists`, `range`, and `simple_query_string`. You can query the following information associated with user: `username`, `roles`, `enabled`, `full_name`, and `email`., from: num=0 # The starting document offset. It must not be negative. By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. To page through more hits, use the `search_after` parameter., sort: any # The sort definition. Fields eligible for sorting are: `username`, `roles`, `enabled`. In addition, sort can also be applied to the `_doc` field to sort by index order., size: num=10 # The number of hits to return. It must not be negative. By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. To page through more hits, use the `search_after` parameter., search_after: any # The search after definition}
@returns(200) {total: num, count: num, users: [any]}
@example_request "{\n    \"query\": {\n        \"prefix\": {\n            \"roles\": \"other\"\n        }\n    }\n}"

@endpoint POST /_security/_query/user
@desc Find users with a query
@optional {with_profile_uid: bool # Determines whether to retrieve the user profile UID, if it exists, for the users., query: any # A query to filter which users to return. If the query parameter is missing, it is equivalent to a `match_all` query. The query supports a subset of query types, including `match_all`, `bool`, `term`, `terms`, `match`, `ids`, `prefix`, `wildcard`, `exists`, `range`, and `simple_query_string`. You can query the following information associated with user: `username`, `roles`, `enabled`, `full_name`, and `email`., from: num=0 # The starting document offset. It must not be negative. By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. To page through more hits, use the `search_after` parameter., sort: any # The sort definition. Fields eligible for sorting are: `username`, `roles`, `enabled`. In addition, sort can also be applied to the `_doc` field to sort by index order., size: num=10 # The number of hits to return. It must not be negative. By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. To page through more hits, use the `search_after` parameter., search_after: any # The search after definition}
@returns(200) {total: num, count: num, users: [any]}
@example_request "{\n    \"query\": {\n        \"prefix\": {\n            \"roles\": \"other\"\n        }\n    }\n}"

@endpoint POST /_security/saml/authenticate
@desc Authenticate SAML
@required {content: str # The SAML response as it was sent by the user's browser, usually a Base64 encoded XML document., ids: any # A JSON array with all the valid SAML Request Ids that the caller of the API has for the current user.}
@optional {realm: str # The name of the realm that should authenticate the SAML response. Useful in cases where many SAML realms are defined.}
@returns(200) {access_token: str, username: str, expires_in: num, refresh_token: str, realm: str, in_response_to: str}
@example_request "{\n  \"content\" : \"PHNhbWxwOlJlc3BvbnNlIHhtbG5zOnNhbWxwPSJ1cm46b2FzaXM6bmFtZXM6dGM6U0FNTDoyLjA6cHJvdG9jb2wiIHhtbG5zOnNhbWw9InVybjpvYXNpczpuYW1lczp0YzpTQU1MOjIuMD.....\",\n  \"ids\" : [\"4fee3b046395c4e751011e97f8900b5273d56685\"]\n}"

@endpoint POST /_security/saml/complete_logout
@desc Logout of SAML completely
@required {realm: str # The name of the SAML realm in Elasticsearch for which the configuration is used to verify the logout response., ids: any # A JSON array with all the valid SAML Request Ids that the caller of the API has for the current user.}
@optional {query_string: str # If the SAML IdP sends the logout response with the HTTP-Redirect binding, this field must be set to the query string of the redirect URI., content: str # If the SAML IdP sends the logout response with the HTTP-Post binding, this field must be set to the value of the SAMLResponse form parameter from the logout response.}
@returns(200)
@example_request "{\n  \"realm\": \"saml1\",\n  \"ids\": [ \"_1c368075e0b3...\" ],\n  \"query_string\": \"SAMLResponse=fZHLasMwEEVbfb1bf...&SigAlg=http%3A%2F%2Fwww.w3.org%2F2000%2F09%2Fxmldsig%23rsa-sha1&Signature=CuCmFn%2BLqnaZGZJqK...\"\n}"

@endpoint POST /_security/saml/invalidate
@desc Invalidate SAML
@required {query_string: str # The query part of the URL that the user was redirected to by the SAML IdP to initiate the Single Logout. This query should include a single parameter named `SAMLRequest` that contains a SAML logout request that is deflated and Base64 encoded. If the SAML IdP has signed the logout request, the URL should include two extra parameters named `SigAlg` and `Signature` that contain the algorithm used for the signature and the signature value itself. In order for Elasticsearch to be able to verify the IdP's signature, the value of the `query_string` field must be an exact match to the string provided by the browser. The client application must not attempt to parse or process the string in any way.}
@optional {acs: str # The Assertion Consumer Service URL that matches the one of the SAML realm in Elasticsearch that should be used. You must specify either this parameter or the `realm` parameter., realm: str # The name of the SAML realm in Elasticsearch the configuration. You must specify either this parameter or the `acs` parameter.}
@returns(200) {invalidated: num, realm: str, redirect: str}
@example_request "{\n  \"query_string\" : \"SAMLRequest=nZFda4MwFIb%2FiuS%2BmviRpqFaClKQdbvo2g12M2KMraCJ9cRR9utnW4Wyi13sMie873MeznJ1aWrnS3VQGR0j4mLkKC1NUeljjA77zYyhVbIE0dR%2By7fmaHq7U%2BdegXWGpAZ%2B%2F4pR32luBFTAtWgUcCv56%2Fp5y30X87Yz1khTIycdgpUW9kY7WdsC9zxoXTvMvWuVV98YyMnSGH2SYE5pwALBIr9QKiwDGpW0oGVUznGeMyJZKFkQ4jBf5HnhUymjIhzCAL3KNFihbYx8TBYzzGaY7EnIyZwHzCWMfiDnbRIftkSjJr%2BFu0e9v%2B0EgOquRiiZjKpiVFp6j50T4WXoyNJ%2FEWC9fdqc1t%2F1%2B2F3aUpjzhPiXpqMz1%2FHSn4A&SigAlg=http%3A%2F%2Fwww.w3.org%2F2001%2F04%2Fxmldsig-more%23rsa-sha256&Signature=MsAYz2NFdovMG2mXf6TSpu5vlQQyEJAg%2B4KCwBqJTmrb3yGXKUtIgvjqf88eCAK32v3eN8vupjPC8LglYmke1ZnjK0%2FKxzkvSjTVA7mMQe2AQdKbkyC038zzRq%2FYHcjFDE%2Bz0qISwSHZY2NyLePmwU7SexEXnIz37jKC6NMEhus%3D\",\n  \"realm\" : \"saml1\"\n}"

@endpoint POST /_security/saml/logout
@desc Logout of SAML
@required {token: str # The access token that was returned as a response to calling the SAML authenticate API. Alternatively, the most recent token that was received after refreshing the original one by using a `refresh_token`.}
@optional {refresh_token: str # The refresh token that was returned as a response to calling the SAML authenticate API. Alternatively, the most recent refresh token that was received after refreshing the original access token.}
@returns(200) {redirect: str}
@example_request "{\n  \"token\" : \"46ToAxZVaXVVZTVKOVF5YU04ZFJVUDVSZlV3\",\n  \"refresh_token\" : \"mJdXLtmvTUSpoLwMvdBt_w\"\n}"

@endpoint POST /_security/saml/prepare
@desc Prepare SAML authentication
@optional {acs: str # The Assertion Consumer Service URL that matches the one of the SAML realms in Elasticsearch. The realm is used to generate the authentication request. You must specify either this parameter or the `realm` parameter., realm: str # The name of the SAML realm in Elasticsearch for which the configuration is used to generate the authentication request. You must specify either this parameter or the `acs` parameter., relay_state: str # A string that will be included in the redirect URL that this API returns as the `RelayState` query parameter. If the Authentication Request is signed, this value is used as part of the signature computation.}
@returns(200) {id: any, realm: str, redirect: str}
@example_request "{\n  \"realm\" : \"saml1\"\n}"

@endpoint GET /_security/saml/metadata/{realm_name}
@desc Create SAML service provider metadata
@required {realm_name: str # The name of the SAML realm in Elasticsearch.}
@returns(200) {metadata: str}

@endpoint GET /_security/profile/_suggest
@desc Suggest a user profile
@optional {data: any # A comma-separated list of filters for the `data` field of the profile document. To return all content use `data=*`. To return a subset of content, use `data=` to retrieve content nested under the specified ``. By default, the API returns no `data` content. It is an error to specify `data` as both the query parameter and the request body field., name: str # A query string used to match name-related fields in user profile documents. Name-related fields are the user's `username`, `full_name`, and `email`., size: num=10 # The number of profiles to return., data: any # A comma-separated list of filters for the `data` field of the profile document. To return all content use `data=*`. To return a subset of content, use `data=` to retrieve content nested under the specified ``. By default, the API returns no `data` content. It is an error to specify `data` as both the query parameter and the request body field., hint: any # Extra search criteria to improve relevance of the suggestion result. Profiles matching the spcified hint are ranked higher in the response. Profiles not matching the hint aren't excluded from the response as long as the profile matches the `name` field query.}
@returns(200) {total: any, took: num, profiles: [map]}
@example_request "{\n  \"name\": \"jack\",  \n  \"hint\": {\n    \"uids\": [  \n      \"u_8RKO7AKfEbSiIHZkZZ2LJy2MUSDPWDr3tMI_CkIGApU_0\",\n      \"u_79HkWkwmnBH5gqFKwoxggWPjEBOur1zLPXQPEl1VBW0_0\"\n    ],\n    \"labels\": {\n      \"direction\": [\"north\", \"east\"]  \n    }\n  }\n}"

@endpoint POST /_security/profile/_suggest
@desc Suggest a user profile
@optional {data: any # A comma-separated list of filters for the `data` field of the profile document. To return all content use `data=*`. To return a subset of content, use `data=` to retrieve content nested under the specified ``. By default, the API returns no `data` content. It is an error to specify `data` as both the query parameter and the request body field., name: str # A query string used to match name-related fields in user profile documents. Name-related fields are the user's `username`, `full_name`, and `email`., size: num=10 # The number of profiles to return., data: any # A comma-separated list of filters for the `data` field of the profile document. To return all content use `data=*`. To return a subset of content, use `data=` to retrieve content nested under the specified ``. By default, the API returns no `data` content. It is an error to specify `data` as both the query parameter and the request body field., hint: any # Extra search criteria to improve relevance of the suggestion result. Profiles matching the spcified hint are ranked higher in the response. Profiles not matching the hint aren't excluded from the response as long as the profile matches the `name` field query.}
@returns(200) {total: any, took: num, profiles: [map]}
@example_request "{\n  \"name\": \"jack\",  \n  \"hint\": {\n    \"uids\": [  \n      \"u_8RKO7AKfEbSiIHZkZZ2LJy2MUSDPWDr3tMI_CkIGApU_0\",\n      \"u_79HkWkwmnBH5gqFKwoxggWPjEBOur1zLPXQPEl1VBW0_0\"\n    ],\n    \"labels\": {\n      \"direction\": [\"north\", \"east\"]  \n    }\n  }\n}"

@endpoint PUT /_security/api_key/{id}
@desc Update an API key
@required {id: str # The ID of the API key to update.}
@optional {role_descriptors: map # The role descriptors to assign to this API key. The API key's effective permissions are an intersection of its assigned privileges and the point in time snapshot of permissions of the owner user. You can assign new privileges by specifying them in this parameter. To remove assigned privileges, you can supply an empty `role_descriptors` parameter, that is to say, an empty object `{}`. If an API key has no assigned privileges, it inherits the owner user's full permissions. The snapshot of the owner's permissions is always updated, whether you supply the `role_descriptors` parameter or not. The structure of a role descriptor is the same as the request for the create API keys API., metadata: any # Arbitrary metadata that you want to associate with the API key. It supports a nested data structure. Within the metadata object, keys beginning with `_` are reserved for system usage. When specified, this value fully replaces the metadata previously associated with the API key., expiration: any # The expiration time for the API key. By default, API keys never expire. This property can be omitted to leave the expiration unchanged.}
@returns(200) {updated: bool}
@example_request "{\n  \"role_descriptors\": {\n    \"role-a\": {\n      \"indices\": [\n        {\n          \"names\": [\"*\"],\n          \"privileges\": [\"write\"]\n        }\n      ]\n    }\n  },\n  \"metadata\": {\n    \"environment\": {\n      \"level\": 2,\n      \"trusted\": true,\n      \"tags\": [\"production\"]\n    }\n  }\n}"

@endpoint PUT /_security/cross_cluster/api_key/{id}
@desc Update a cross-cluster API key
@required {id: str # The ID of the cross-cluster API key to update., access: any # The access to be granted to this API key. The access is composed of permissions for cross cluster search and cross cluster replication. At least one of them must be specified. When specified, the new access assignment fully replaces the previously assigned access.}
@optional {expiration: any # The expiration time for the API key. By default, API keys never expire. This property can be omitted to leave the value unchanged., metadata: any # Arbitrary metadata that you want to associate with the API key. It supports nested data structure. Within the metadata object, keys beginning with `_` are reserved for system usage. When specified, this information fully replaces metadata previously associated with the API key., certificate_identity: str # The certificate identity to associate with this API key. This field is used to restrict the API key to connections authenticated by a specific TLS certificate. The value should match the certificate's distinguished name (DN) pattern. When specified, this fully replaces any previously assigned certificate identity. To clear an existing certificate identity, explicitly set this field to `null`. When omitted, the existing certificate identity remains unchanged.}
@returns(200) {updated: bool}
@example_request "{\n  \"access\": {\n    \"replication\": [\n      {\n        \"names\": [\"archive\"]\n      }\n    ]\n  },\n  \"metadata\": {\n    \"application\": \"replication\"\n  }\n}"

@endpoint PUT /_security/profile/{uid}/_data
@desc Update user profile data
@required {uid: str # A unique identifier for the user profile.}
@optional {if_seq_no: num # Only perform the operation if the document has this sequence number., if_primary_term: num # Only perform the operation if the document has this primary term., refresh: str # If 'true', Elasticsearch refreshes the affected shards to make this operation visible to search. If 'wait_for', it waits for a refresh to make this operation visible to search. If 'false', nothing is done with refreshes., labels: map # Searchable data that you want to associate with the user profile. This field supports a nested data structure. Within the labels object, top-level keys cannot begin with an underscore (`_`) or contain a period (`.`)., data: map # Non-searchable data that you want to associate with the user profile. This field supports a nested data structure. Within the `data` object, top-level keys cannot begin with an underscore (`_`) or contain a period (`.`). The data object is not searchable, but can be retrieved with the get user profile API.}
@returns(200) {acknowledged: bool}
@example_request "{\n  \"labels\": {\n    \"direction\": \"east\"\n  },\n  \"data\": {\n    \"app1\": {\n      \"theme\": \"default\"\n    }\n  }\n}"

@endpoint POST /_security/profile/{uid}/_data
@desc Update user profile data
@required {uid: str # A unique identifier for the user profile.}
@optional {if_seq_no: num # Only perform the operation if the document has this sequence number., if_primary_term: num # Only perform the operation if the document has this primary term., refresh: str # If 'true', Elasticsearch refreshes the affected shards to make this operation visible to search. If 'wait_for', it waits for a refresh to make this operation visible to search. If 'false', nothing is done with refreshes., labels: map # Searchable data that you want to associate with the user profile. This field supports a nested data structure. Within the labels object, top-level keys cannot begin with an underscore (`_`) or contain a period (`.`)., data: map # Non-searchable data that you want to associate with the user profile. This field supports a nested data structure. Within the `data` object, top-level keys cannot begin with an underscore (`_`) or contain a period (`.`). The data object is not searchable, but can be retrieved with the get user profile API.}
@returns(200) {acknowledged: bool}
@example_request "{\n  \"labels\": {\n    \"direction\": \"east\"\n  },\n  \"data\": {\n    \"app1\": {\n      \"theme\": \"default\"\n    }\n  }\n}"

@endgroup

@group _ingest
@endpoint GET /_ingest/_simulate
@desc Simulate data ingestion
@required {docs: [map{_id: any, _index: any, _source!: map}] # Sample documents to test in the pipeline.}
@optional {pipeline: str # The pipeline to use as the default pipeline. This value can be used to override the default pipeline of the index., merge_type: str # The mapping merge type if mapping overrides are being provided in mapping_addition. The allowed values are one of index or template. The index option merges mappings the way they would be merged into an existing index. The template option merges mappings the way they would be merged into a template., component_template_substitutions: map # A map of component template names to substitute component template definition objects., index_template_substitutions: map # A map of index template names to substitute index template definition objects., mapping_addition: any, pipeline_substitutions: map # Pipelines to test. If you don’t specify the `pipeline` request path parameter, this parameter is required. If you specify both this and the request path parameter, the API only uses the request path parameter.}
@returns(200) {docs: [map]}
@example_request "{\n  \"docs\": [\n    {\n      \"_id\": \"123\",\n      \"_index\": \"my-index\",\n      \"_source\": {\n        \"foo\": \"bar\"\n      }\n    },\n    {\n      \"_id\": \"456\",\n      \"_index\": \"my-index\",\n      \"_source\": {\n        \"foo\": \"rab\"\n      }\n    }\n  ]\n}"

@endpoint POST /_ingest/_simulate
@desc Simulate data ingestion
@required {docs: [map{_id: any, _index: any, _source!: map}] # Sample documents to test in the pipeline.}
@optional {pipeline: str # The pipeline to use as the default pipeline. This value can be used to override the default pipeline of the index., merge_type: str # The mapping merge type if mapping overrides are being provided in mapping_addition. The allowed values are one of index or template. The index option merges mappings the way they would be merged into an existing index. The template option merges mappings the way they would be merged into a template., component_template_substitutions: map # A map of component template names to substitute component template definition objects., index_template_substitutions: map # A map of index template names to substitute index template definition objects., mapping_addition: any, pipeline_substitutions: map # Pipelines to test. If you don’t specify the `pipeline` request path parameter, this parameter is required. If you specify both this and the request path parameter, the API only uses the request path parameter.}
@returns(200) {docs: [map]}
@example_request "{\n  \"docs\": [\n    {\n      \"_id\": \"123\",\n      \"_index\": \"my-index\",\n      \"_source\": {\n        \"foo\": \"bar\"\n      }\n    },\n    {\n      \"_id\": \"456\",\n      \"_index\": \"my-index\",\n      \"_source\": {\n        \"foo\": \"rab\"\n      }\n    }\n  ]\n}"

@endpoint GET /_ingest/{index}/_simulate
@desc Simulate data ingestion
@required {index: str # The index to simulate ingesting into. This value can be overridden by specifying an index on each document. If you specify this parameter in the request path, it is used for any documents that do not explicitly specify an index argument., docs: [map{_id: any, _index: any, _source!: map}] # Sample documents to test in the pipeline.}
@optional {pipeline: str # The pipeline to use as the default pipeline. This value can be used to override the default pipeline of the index., merge_type: str # The mapping merge type if mapping overrides are being provided in mapping_addition. The allowed values are one of index or template. The index option merges mappings the way they would be merged into an existing index. The template option merges mappings the way they would be merged into a template., component_template_substitutions: map # A map of component template names to substitute component template definition objects., index_template_substitutions: map # A map of index template names to substitute index template definition objects., mapping_addition: any, pipeline_substitutions: map # Pipelines to test. If you don’t specify the `pipeline` request path parameter, this parameter is required. If you specify both this and the request path parameter, the API only uses the request path parameter.}
@returns(200) {docs: [map]}
@example_request "{\n  \"docs\": [\n    {\n      \"_id\": \"123\",\n      \"_index\": \"my-index\",\n      \"_source\": {\n        \"foo\": \"bar\"\n      }\n    },\n    {\n      \"_id\": \"456\",\n      \"_index\": \"my-index\",\n      \"_source\": {\n        \"foo\": \"rab\"\n      }\n    }\n  ]\n}"

@endpoint POST /_ingest/{index}/_simulate
@desc Simulate data ingestion
@required {index: str # The index to simulate ingesting into. This value can be overridden by specifying an index on each document. If you specify this parameter in the request path, it is used for any documents that do not explicitly specify an index argument., docs: [map{_id: any, _index: any, _source!: map}] # Sample documents to test in the pipeline.}
@optional {pipeline: str # The pipeline to use as the default pipeline. This value can be used to override the default pipeline of the index., merge_type: str # The mapping merge type if mapping overrides are being provided in mapping_addition. The allowed values are one of index or template. The index option merges mappings the way they would be merged into an existing index. The template option merges mappings the way they would be merged into a template., component_template_substitutions: map # A map of component template names to substitute component template definition objects., index_template_substitutions: map # A map of index template names to substitute index template definition objects., mapping_addition: any, pipeline_substitutions: map # Pipelines to test. If you don’t specify the `pipeline` request path parameter, this parameter is required. If you specify both this and the request path parameter, the API only uses the request path parameter.}
@returns(200) {docs: [map]}
@example_request "{\n  \"docs\": [\n    {\n      \"_id\": \"123\",\n      \"_index\": \"my-index\",\n      \"_source\": {\n        \"foo\": \"bar\"\n      }\n    },\n    {\n      \"_id\": \"456\",\n      \"_index\": \"my-index\",\n      \"_source\": {\n        \"foo\": \"rab\"\n      }\n    }\n  ]\n}"

@endgroup

@group _slm
@endpoint GET /_slm/policy/{policy_id}
@desc Get policy information
@required {policy_id: any # A comma-separated list of snapshot lifecycle policy identifiers.}
@optional {master_timeout: any # The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error., timeout: any # The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error.}
@returns(200)

@endpoint PUT /_slm/policy/{policy_id}
@desc Create or update a policy
@required {policy_id: str # The identifier for the snapshot lifecycle policy you want to create or update.}
@optional {master_timeout: any # The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`., timeout: any # The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`., config: any # Configuration for each snapshot created by the policy., name: any # Name automatically assigned to each snapshot created by the policy. Date math is supported. To prevent conflicting snapshot names, a UUID is automatically appended to each snapshot name., repository: str # Repository used to store snapshots created by this policy. This repository must exist prior to the policy’s creation. You can create a repository using the snapshot repository API., retention: any # Retention rules used to retain and delete snapshots created by the policy., schedule: any # Periodic or absolute schedule at which the policy creates snapshots. SLM applies schedule changes immediately.}
@returns(200) {acknowledged: bool}
@example_request "{\n  \"schedule\": \"0 30 1 * * ?\",\n  \"name\": \"<daily-snap-{now/d}>\",\n  \"repository\": \"my_repository\",\n  \"config\": {\n    \"indices\": [\"data-*\", \"important\"],\n    \"ignore_unavailable\": false,\n    \"include_global_state\": false\n  },\n  \"retention\": {\n    \"expire_after\": \"30d\",\n    \"min_count\": 5,\n    \"max_count\": 50\n  }\n}"

@endpoint DELETE /_slm/policy/{policy_id}
@desc Delete a policy
@required {policy_id: str # The id of the snapshot lifecycle policy to remove}
@optional {master_timeout: any # The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error., timeout: any # The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error.}
@returns(200) {acknowledged: bool}

@endpoint PUT /_slm/policy/{policy_id}/_execute
@desc Run a policy
@required {policy_id: str # The id of the snapshot lifecycle policy to be executed}
@optional {master_timeout: any # The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error., timeout: any # The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error.}
@returns(200) {snapshot_name: any}

@endpoint POST /_slm/_execute_retention
@desc Run a retention policy
@optional {master_timeout: any # The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error., timeout: any # The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error.}
@returns(200) {acknowledged: bool}

@endpoint GET /_slm/policy
@desc Get policy information
@optional {master_timeout: any # The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error., timeout: any # The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error.}
@returns(200)

@endpoint GET /_slm/stats
@desc Get snapshot lifecycle management statistics
@optional {master_timeout: any # Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error., timeout: any # Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error.}
@returns(200) {retention_deletion_time: any, retention_deletion_time_millis: any, retention_failed: num, retention_runs: num, retention_timed_out: num, total_snapshots_deleted: num, total_snapshot_deletion_failures: num, total_snapshots_failed: num, total_snapshots_taken: num, policy_stats: [map]}

@endpoint GET /_slm/status
@desc Get the snapshot lifecycle management status
@optional {master_timeout: any # The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`., timeout: any # The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`.}
@returns(200) {operation_mode: any}

@endpoint POST /_slm/start
@desc Start snapshot lifecycle management
@optional {master_timeout: any # The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`., timeout: any # The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`.}
@returns(200) {acknowledged: bool}

@endpoint POST /_slm/stop
@desc Stop snapshot lifecycle management
@optional {master_timeout: any # The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`., timeout: any # The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`.}
@returns(200) {acknowledged: bool}

@endgroup

@group _snapshot
@endpoint POST /_snapshot/{repository}/_cleanup
@desc Clean up the snapshot repository
@required {repository: str # The name of the snapshot repository to clean up.}
@optional {master_timeout: any # The period to wait for a connection to the master node. If the master node is not available before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`, timeout: any # The period to wait for a response from all relevant nodes in the cluster after updating the cluster metadata. If no response is received before the timeout expires, the cluster metadata update still applies but the response will indicate that it was not completely acknowledged. To indicate that the request should never timeout, set it to `-1`.}
@returns(200) {results: any}

@endpoint PUT /_snapshot/{repository}/{snapshot}/_clone/{target_snapshot}
@desc Clone a snapshot
@required {repository: str # The name of the snapshot repository that both source and target snapshot belong to., snapshot: str # The source snapshot name., target_snapshot: str # The target snapshot name., indices: str # A comma-separated list of indices to include in the snapshot. Multi-target syntax is supported.}
@optional {master_timeout: any # The period to wait for the master node. If the master node is not available before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`.}
@returns(200) {acknowledged: bool}
@example_request "{\n  \"indices\": \"index_a,index_b\"\n}"

@endpoint GET /_snapshot/{repository}/{snapshot}
@desc Get snapshot information
@required {repository: str # A comma-separated list of snapshot repository names used to limit the request. Wildcard (`*`) expressions are supported., snapshot: any # A comma-separated list of snapshot names to retrieve Wildcards (`*`) are supported.  * To get information about all snapshots in a registered repository, use a wildcard (`*`) or `_all`. * To get information about any snapshots that are currently running, use `_current`.}
@optional {after: str # An offset identifier to start pagination from as returned by the next field in the response body., from_sort_value: str # The value of the current sort column at which to start retrieval. It can be a string `snapshot-` or a repository name when sorting by snapshot or repository name. It can be a millisecond time value or a number when sorting by `index-` or shard count., ignore_unavailable: bool # If `false`, the request returns an error for any snapshots that are unavailable., index_details: bool # If `true`, the response includes additional information about each index in the snapshot comprising the number of shards in the index, the total size of the index in bytes, and the maximum number of segments per shard in the index. The default is `false`, meaning that this information is omitted., index_names: bool # If `true`, the response includes the name of each index in each snapshot., include_repository: bool # If `true`, the response includes the repository name in each snapshot., master_timeout: any # The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error., order: str # The sort order. Valid values are `asc` for ascending and `desc` for descending order. The default behavior is ascending order., offset: num # Numeric offset to start pagination from based on the snapshots matching this request. Using a non-zero value for this parameter is mutually exclusive with using the after parameter. Defaults to 0., size: num # The maximum number of snapshots to return. The default is -1, which means to return all that match the request without limit., slm_policy_filter: str # Filter snapshots by a comma-separated list of snapshot lifecycle management (SLM) policy names that snapshots belong to.  You can use wildcards (`*`) and combinations of wildcards followed by exclude patterns starting with `-`. For example, the pattern `*,-policy-a-\*` will return all snapshots except for those that were created by an SLM policy with a name starting with `policy-a-`. Note that the wildcard pattern `*` matches all snapshots created by an SLM policy but not those snapshots that were not created by an SLM policy. To include snapshots that were not created by an SLM policy, you can use the special pattern `_none` that will match all snapshots without an SLM policy., sort: str # The sort order for the result. The default behavior is sorting by snapshot start time stamp., state: any # Only return snapshots with a state found in the given comma-separated list of snapshot states. The default is all snapshot states., verbose: bool # If `true`, returns additional information about each snapshot such as the version of Elasticsearch which took the snapshot, the start and end times of the snapshot, and the number of shards snapshotted.  NOTE: The parameters `size`, `order`, `after`, `from_sort_value`, `offset`, `slm_policy_filter`, and `sort` are not supported when you set `verbose=false` and the sort order for requests with `verbose=false` is undefined.}
@returns(200) {remaining: num, total: num, next: str, responses: [map], snapshots: [map]}

@endpoint PUT /_snapshot/{repository}/{snapshot}
@desc Create a snapshot
@required {repository: str # The name of the repository for the snapshot., snapshot: str # The name of the snapshot. It supportes date math. It must be unique in the repository.}
@optional {master_timeout: any # The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error., wait_for_completion: bool # If `true`, the request returns a response when the snapshot is complete. If `false`, the request returns a response when the snapshot initializes., expand_wildcards: any=all # Determines how wildcard patterns in the `indices` parameter match data streams and indices. It supports comma-separated values such as `open,hidden`., feature_states: [str] # The feature states to include in the snapshot. Each feature state includes one or more system indices containing related data. You can view a list of eligible features using the get features API.  If `include_global_state` is `true`, all current feature states are included by default. If `include_global_state` is `false`, no feature states are included by default.  Note that specifying an empty array will result in the default behavior. To exclude all feature states, regardless of the `include_global_state` value, specify an array with only the value `none` (`["none"]`)., ignore_unavailable: bool=false # If `true`, the request ignores data streams and indices in `indices` that are missing or closed. If `false`, the request returns an error for any data stream or index that is missing or closed., include_global_state: bool=true # If `true`, the current cluster state is included in the snapshot. The cluster state includes persistent cluster settings, composable index templates, legacy index templates, ingest pipelines, and ILM policies. It also includes data stored in system indices, such as Watches and task records (configurable via `feature_states`)., indices: any # A comma-separated list of data streams and indices to include in the snapshot. It supports a multi-target syntax. The default is an empty array (`[]`), which includes all regular data streams and regular indices. To exclude all data streams and indices, use `-*`.  You can't use this parameter to include or exclude system indices or system data streams from a snapshot. Use `feature_states` instead., metadata: any # Arbitrary metadata to the snapshot, such as a record of who took the snapshot, why it was taken, or any other useful data. It can have any contents but it must be less than 1024 bytes. This information is not automatically generated by Elasticsearch., partial: bool=false # If `true`, it enables you to restore a partial snapshot of indices with unavailable shards. Only shards that were successfully included in the snapshot will be restored. All missing shards will be recreated as empty.  If `false`, the entire restore operation will fail if one or more indices included in the snapshot do not have all primary shards available.}
@returns(200) {accepted: bool, snapshot: any}
@example_request "{\n  \"indices\": \"index_1,index_2\",\n  \"ignore_unavailable\": true,\n  \"include_global_state\": false,\n  \"metadata\": {\n    \"taken_by\": \"user123\",\n    \"taken_because\": \"backup before upgrading\"\n  }\n}"

@endpoint POST /_snapshot/{repository}/{snapshot}
@desc Create a snapshot
@required {repository: str # The name of the repository for the snapshot., snapshot: str # The name of the snapshot. It supportes date math. It must be unique in the repository.}
@optional {master_timeout: any # The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error., wait_for_completion: bool # If `true`, the request returns a response when the snapshot is complete. If `false`, the request returns a response when the snapshot initializes., expand_wildcards: any=all # Determines how wildcard patterns in the `indices` parameter match data streams and indices. It supports comma-separated values such as `open,hidden`., feature_states: [str] # The feature states to include in the snapshot. Each feature state includes one or more system indices containing related data. You can view a list of eligible features using the get features API.  If `include_global_state` is `true`, all current feature states are included by default. If `include_global_state` is `false`, no feature states are included by default.  Note that specifying an empty array will result in the default behavior. To exclude all feature states, regardless of the `include_global_state` value, specify an array with only the value `none` (`["none"]`)., ignore_unavailable: bool=false # If `true`, the request ignores data streams and indices in `indices` that are missing or closed. If `false`, the request returns an error for any data stream or index that is missing or closed., include_global_state: bool=true # If `true`, the current cluster state is included in the snapshot. The cluster state includes persistent cluster settings, composable index templates, legacy index templates, ingest pipelines, and ILM policies. It also includes data stored in system indices, such as Watches and task records (configurable via `feature_states`)., indices: any # A comma-separated list of data streams and indices to include in the snapshot. It supports a multi-target syntax. The default is an empty array (`[]`), which includes all regular data streams and regular indices. To exclude all data streams and indices, use `-*`.  You can't use this parameter to include or exclude system indices or system data streams from a snapshot. Use `feature_states` instead., metadata: any # Arbitrary metadata to the snapshot, such as a record of who took the snapshot, why it was taken, or any other useful data. It can have any contents but it must be less than 1024 bytes. This information is not automatically generated by Elasticsearch., partial: bool=false # If `true`, it enables you to restore a partial snapshot of indices with unavailable shards. Only shards that were successfully included in the snapshot will be restored. All missing shards will be recreated as empty.  If `false`, the entire restore operation will fail if one or more indices included in the snapshot do not have all primary shards available.}
@returns(200) {accepted: bool, snapshot: any}
@example_request "{\n  \"indices\": \"index_1,index_2\",\n  \"ignore_unavailable\": true,\n  \"include_global_state\": false,\n  \"metadata\": {\n    \"taken_by\": \"user123\",\n    \"taken_because\": \"backup before upgrading\"\n  }\n}"

@endpoint DELETE /_snapshot/{repository}/{snapshot}
@desc Delete snapshots
@required {repository: str # The name of the repository to delete a snapshot from., snapshot: any # A comma-separated list of snapshot names to delete. It also accepts wildcards (`*`).}
@optional {master_timeout: any # The period to wait for the master node. If the master node is not available before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`., wait_for_completion: bool # If `true`, the request returns a response when the matching snapshots are all deleted. If `false`, the request returns a response as soon as the deletes are scheduled.}
@returns(200) {acknowledged: bool}

@endpoint GET /_snapshot/{repository}
@desc Get snapshot repository information
@required {repository: any # A comma-separated list of snapshot repository names used to limit the request. Wildcard (`*`) expressions are supported including combining wildcards with exclude patterns starting with `-`.  To get information about all snapshot repositories registered in the cluster, omit this parameter or use `*` or `_all`.}
@optional {local: bool # If `true`, the request gets information from the local node only. If `false`, the request gets information from the master node., master_timeout: any # The period to wait for the master node. If the master node is not available before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`.}
@returns(200)

@endpoint PUT /_snapshot/{repository}
@desc Create or update a snapshot repository
@required {repository: str # The name of the snapshot repository to register or update.}
@optional {master_timeout: any # The period to wait for the master node. If the master node is not available before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`., timeout: any # The period to wait for a response from all relevant nodes in the cluster after updating the cluster metadata. If no response is received before the timeout expires, the cluster metadata update still applies but the response will indicate that it was not completely acknowledged. To indicate that the request should never timeout, set it to `-1`., verify: bool # If `true`, the request verifies the repository is functional on all master and data nodes in the cluster. If `false`, this verification is skipped. You can also perform this verification with the verify snapshot repository API.}
@returns(200) {acknowledged: bool}
@example_request "{\n  \"type\": \"fs\",\n  \"settings\": {\n    \"location\": \"my_backup_location\"\n  }\n}"

@endpoint POST /_snapshot/{repository}
@desc Create or update a snapshot repository
@required {repository: str # The name of the snapshot repository to register or update.}
@optional {master_timeout: any # The period to wait for the master node. If the master node is not available before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`., timeout: any # The period to wait for a response from all relevant nodes in the cluster after updating the cluster metadata. If no response is received before the timeout expires, the cluster metadata update still applies but the response will indicate that it was not completely acknowledged. To indicate that the request should never timeout, set it to `-1`., verify: bool # If `true`, the request verifies the repository is functional on all master and data nodes in the cluster. If `false`, this verification is skipped. You can also perform this verification with the verify snapshot repository API.}
@returns(200) {acknowledged: bool}
@example_request "{\n  \"type\": \"fs\",\n  \"settings\": {\n    \"location\": \"my_backup_location\"\n  }\n}"

@endpoint DELETE /_snapshot/{repository}
@desc Delete snapshot repositories
@required {repository: any # The ame of the snapshot repositories to unregister. Wildcard (`*`) patterns are supported.}
@optional {master_timeout: any # The period to wait for the master node. If the master node is not available before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`., timeout: any # The period to wait for a response from all relevant nodes in the cluster after updating the cluster metadata. If no response is received before the timeout expires, the cluster metadata update still applies but the response will indicate that it was not completely acknowledged. To indicate that the request should never timeout, set it to `-1`.}
@returns(200) {acknowledged: bool}

@endpoint GET /_snapshot
@desc Get snapshot repository information
@optional {local: bool # If `true`, the request gets information from the local node only. If `false`, the request gets information from the master node., master_timeout: any # The period to wait for the master node. If the master node is not available before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`.}
@returns(200)

@endpoint POST /_snapshot/{repository}/_analyze
@desc Analyze a snapshot repository
@required {repository: str # The name of the repository.}
@optional {blob_count: num # The total number of blobs to write to the repository during the test. For realistic experiments, set this parameter to at least `2000`., concurrency: num # The number of operations to run concurrently during the test. For realistic experiments, leave this parameter unset., detailed: bool # Indicates whether to return detailed results, including timing information for every operation performed during the analysis. If false, it returns only a summary of the analysis., early_read_node_count: num # The number of nodes on which to perform an early read operation while writing each blob. Early read operations are only rarely performed. For realistic experiments, leave this parameter unset., max_blob_size: any # The maximum size of a blob to be written during the test. For realistic experiments, set this parameter to at least `2gb`., max_total_data_size: any # An upper limit on the total size of all the blobs written during the test. For realistic experiments, set this parameter to at least `1tb`., rare_action_probability: num # The probability of performing a rare action such as an early read, an overwrite, or an aborted write on each blob. For realistic experiments, leave this parameter unset., rarely_abort_writes: bool # Indicates whether to rarely cancel writes before they complete. For realistic experiments, leave this parameter unset., read_node_count: num # The number of nodes on which to read a blob after writing. For realistic experiments, leave this parameter unset., register_operation_count: num # The minimum number of linearizable register operations to perform in total. For realistic experiments, set this parameter to at least `100`., seed: num # The seed for the pseudo-random number generator used to generate the list of operations performed during the test. To repeat the same set of operations in multiple experiments, use the same seed in each experiment. Note that the operations are performed concurrently so might not always happen in the same order on each run. For realistic experiments, leave this parameter unset., timeout: any # The period of time to wait for the test to complete. If no response is received before the timeout expires, the test is cancelled and returns an error. For realistic experiments, set this parameter sufficiently long to allow the test to complete.}
@returns(200) {blob_count: num, blob_path: str, concurrency: num, coordinating_node: any, delete_elapsed: any, delete_elapsed_nanos: any, details: any, early_read_node_count: num, issues_detected: [str], listing_elapsed: any, listing_elapsed_nanos: any, max_blob_size: any, max_blob_size_bytes: num, max_total_data_size: any, max_total_data_size_bytes: num, rare_action_probability: num, read_node_count: num, repository: str, seed: num, summary: any}

@endpoint POST /_snapshot/{repository}/_verify_integrity
@desc Verify the repository integrity
@required {repository: any # The name of the snapshot repository.}
@optional {blob_thread_pool_concurrency: num # If `verify_blob_contents` is `true`, this parameter specifies how many blobs to verify at once., index_snapshot_verification_concurrency: num # The maximum number of index snapshots to verify concurrently within each index verification., index_verification_concurrency: num # The number of indices to verify concurrently. The default behavior is to use the entire `snapshot_meta` thread pool., max_bytes_per_sec: str # If `verify_blob_contents` is `true`, this parameter specifies the maximum amount of data that Elasticsearch will read from the repository every second., max_failed_shard_snapshots: num # The number of shard snapshot failures to track during integrity verification, in order to avoid excessive resource usage. If your repository contains more than this number of shard snapshot failures, the verification will fail., meta_thread_pool_concurrency: num # The maximum number of snapshot metadata operations to run concurrently. The default behavior is to use at most half of the `snapshot_meta` thread pool at once., snapshot_verification_concurrency: num # The number of snapshots to verify concurrently. The default behavior is to use at most half of the `snapshot_meta` thread pool at once., verify_blob_contents: bool # Indicates whether to verify the checksum of every data blob in the repository. If this feature is enabled, Elasticsearch will read the entire repository contents, which may be extremely slow and expensive.}
@returns(200)

@endpoint POST /_snapshot/{repository}/{snapshot}/_restore
@desc Restore a snapshot
@required {repository: str # The name of the repository to restore a snapshot from., snapshot: str # The name of the snapshot to restore.}
@optional {master_timeout: any # The period to wait for the master node. If the master node is not available before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`., wait_for_completion: bool # If `true`, the request returns a response when the restore operation completes. The operation is complete when it finishes all attempts to recover primary shards for restored indices. This applies even if one or more of the recovery attempts fail.  If `false`, the request returns a response when the restore operation initializes., feature_states: [str] # The feature states to restore. If `include_global_state` is `true`, the request restores all feature states in the snapshot by default. If `include_global_state` is `false`, the request restores no feature states by default. Note that specifying an empty array will result in the default behavior. To restore no feature states, regardless of the `include_global_state` value, specify an array containing only the value `none` (`["none"]`)., ignore_index_settings: [str] # The index settings to not restore from the snapshot. You can't use this option to ignore `index.number_of_shards`.  For data streams, this option applies only to restored backing indices. New backing indices are configured using the data stream's matching index template., ignore_unavailable: bool=false # If `true`, the request ignores any index or data stream in indices that's missing from the snapshot. If `false`, the request returns an error for any missing index or data stream., include_aliases: bool=true # If `true`, the request restores aliases for any restored data streams and indices. If `false`, the request doesn’t restore aliases., include_global_state: bool=false # If `true`, restore the cluster state. The cluster state includes:  * Persistent cluster settings * Index templates * Legacy index templates * Ingest pipelines * Index lifecycle management (ILM) policies * Stored scripts * For snapshots taken after 7.12.0, feature states  If `include_global_state` is `true`, the restore operation merges the legacy index templates in your cluster with the templates contained in the snapshot, replacing any existing ones whose name matches one in the snapshot. It completely removes all persistent settings, non-legacy index templates, ingest pipelines, and ILM lifecycle policies that exist in your cluster and replaces them with the corresponding items from the snapshot.  Use the `feature_states` parameter to configure how feature states are restored.  If `include_global_state` is `true` and a snapshot was created without a global state then the restore request will fail., index_settings: any # Index settings to add or change in restored indices, including backing indices. You can't use this option to change `index.number_of_shards`.  For data streams, this option applies only to restored backing indices. New backing indices are configured using the data stream's matching index template., indices: any # A comma-separated list of indices and data streams to restore. It supports a multi-target syntax. The default behavior is all regular indices and regular data streams in the snapshot.  You can't use this parameter to restore system indices or system data streams. Use `feature_states` instead., partial: bool=false # If `false`, the entire restore operation will fail if one or more indices included in the snapshot do not have all primary shards available.  If true, it allows restoring a partial snapshot of indices with unavailable shards. Only shards that were successfully included in the snapshot will be restored. All missing shards will be recreated as empty., rename_pattern: str # A rename pattern to apply to restored data streams and indices. Data streams and indices matching the rename pattern will be renamed according to `rename_replacement`.  The rename pattern is applied as defined by the regular expression that supports referencing the original text, according to the `appendReplacement` logic., rename_replacement: str # The rename replacement string that is used with the `rename_pattern`.}
@returns(200) {accepted: bool, snapshot: any}
@example_request "{\n  \"indices\": \"index_1,index_2\",\n  \"ignore_unavailable\": true,\n  \"include_global_state\": false,\n  \"rename_pattern\": \"index_(.+)\",\n  \"rename_replacement\": \"restored_index_$1\",\n  \"include_aliases\": false\n}"

@endpoint GET /_snapshot/_status
@desc Get the snapshot status
@optional {ignore_unavailable: bool # If `false`, the request returns an error for any snapshots that are unavailable. If `true`, the request ignores snapshots that are unavailable, such as those that are corrupted or temporarily cannot be returned., master_timeout: any # The period to wait for the master node. If the master node is not available before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`.}
@returns(200) {snapshots: [map]}

@endpoint GET /_snapshot/{repository}/_status
@desc Get the snapshot status
@required {repository: str # The snapshot repository name used to limit the request. It supports wildcards (`*`) if `` isn't specified.}
@optional {ignore_unavailable: bool # If `false`, the request returns an error for any snapshots that are unavailable. If `true`, the request ignores snapshots that are unavailable, such as those that are corrupted or temporarily cannot be returned., master_timeout: any # The period to wait for the master node. If the master node is not available before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`.}
@returns(200) {snapshots: [map]}

@endpoint GET /_snapshot/{repository}/{snapshot}/_status
@desc Get the snapshot status
@required {repository: str # The snapshot repository name used to limit the request. It supports wildcards (`*`) if `` isn't specified., snapshot: any # A comma-separated list of snapshots to retrieve status for. The default is currently running snapshots. Wildcards (`*`) are not supported.}
@optional {ignore_unavailable: bool # If `false`, the request returns an error for any snapshots that are unavailable. If `true`, the request ignores snapshots that are unavailable, such as those that are corrupted or temporarily cannot be returned., master_timeout: any # The period to wait for the master node. If the master node is not available before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`.}
@returns(200) {snapshots: [map]}

@endpoint POST /_snapshot/{repository}/_verify
@desc Verify a snapshot repository
@required {repository: str # The name of the snapshot repository to verify.}
@optional {master_timeout: any # The period to wait for the master node. If the master node is not available before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`., timeout: any # The period to wait for a response from all relevant nodes in the cluster after updating the cluster metadata. If no response is received before the timeout expires, the cluster metadata update still applies but the response will indicate that it was not completely acknowledged. To indicate that the request should never timeout, set it to `-1`.}
@returns(200) {nodes: map}

@endgroup

@group _sql
@endpoint POST /_sql/close
@desc Clear an SQL search cursor
@required {cursor: str # Cursor to clear.}
@returns(200) {succeeded: bool}
@example_request "{\n  \"cursor\": \"sDXF1ZXJ5QW5kRmV0Y2gBAAAAAAAAAAEWYUpOYklQMHhRUEtld3RsNnFtYU1hQQ==:BAFmBGRhdGUBZgVsaWtlcwFzB21lc3NhZ2UBZgR1c2Vy9f///w8=\"\n}"

@endpoint DELETE /_sql/async/delete/{id}
@desc Delete an async SQL search
@required {id: str # The identifier for the search.}
@returns(200) {acknowledged: bool}

@endpoint GET /_sql/async/{id}
@desc Get async SQL search results
@required {id: str # The identifier for the search.}
@optional {delimiter: str # The separator for CSV results. The API supports this parameter only for CSV responses., format: str # The format for the response. You must specify a format using this parameter or the `Accept` HTTP header. If you specify both, the API uses this parameter., keep_alive: any # The retention period for the search and its results. It defaults to the `keep_alive` period for the original SQL search., wait_for_completion_timeout: any # The period to wait for complete results. It defaults to no timeout, meaning the request waits for complete search results.}
@returns(200) {id: any, is_running: bool, is_partial: bool, columns: [map], cursor: str, rows: [[map]]}

@endpoint GET /_sql/async/status/{id}
@desc Get the async SQL search status
@required {id: str # The identifier for the search.}
@returns(200) {expiration_time_in_millis: any, id: str, is_running: bool, is_partial: bool, start_time_in_millis: any, completion_status: any}

@endpoint GET /_sql
@desc Get SQL search results
@optional {format: str # The format for the response. You can also specify a format using the `Accept` HTTP header. If you specify both this parameter and the `Accept` HTTP header, this parameter takes precedence., allow_partial_search_results: bool=false # If `true`, the response has partial results when there are shard request timeouts or shard failures. If `false`, the API returns an error with no partial results., catalog: str # The default catalog (cluster) for queries. If unspecified, the queries execute on the data in the local cluster only., columnar: bool=false # If `true`, the results are in a columnar fashion: one row represents all the values of a certain column from the current page of results. The API supports this parameter only for CBOR, JSON, SMILE, and YAML responses., cursor: str # The cursor used to retrieve a set of paginated results. If you specify a cursor, the API only uses the `columnar` and `time_zone` request body parameters. It ignores other request body parameters., fetch_size: num=1000 # The maximum number of rows (or entries) to return in one response., field_multi_value_leniency: bool=false # If `false`, the API returns an exception when encountering multiple values for a field. If `true`, the API is lenient and returns the first value from the array with no guarantee of consistent results., filter: any=none # The Elasticsearch query DSL for additional filtering., index_using_frozen: bool=false # If `true`, the search can run on frozen indices., keep_alive: any=5d # The retention period for an async or saved synchronous search., keep_on_completion: bool=false # If `true`, Elasticsearch stores synchronous searches if you also specify the `wait_for_completion_timeout` parameter. If `false`, Elasticsearch only stores async searches that don't finish before the `wait_for_completion_timeout`., page_timeout: any=45s # The minimum retention period for the scroll cursor. After this time period, a pagination request might fail because the scroll cursor is no longer available. Subsequent scroll requests prolong the lifetime of the scroll cursor by the duration of `page_timeout` in the scroll request., params: [map] # The values for parameters in the query., query: str # The SQL query to run., request_timeout: any=90s # The timeout before the request fails., runtime_mappings: any # One or more runtime fields for the search request. These fields take precedence over mapped fields with the same name., time_zone: any=Z # The ISO-8601 time zone ID for the search., wait_for_completion_timeout: any # The period to wait for complete results. It defaults to no timeout, meaning the request waits for complete search results. If the search doesn't finish within this period, the search becomes async.  To save a synchronous search, you must specify this parameter and the `keep_on_completion` parameter.}
@returns(200) {columns: [map], cursor: str, id: any, is_running: bool, is_partial: bool, rows: [[map]]}
@example_request "{\n  \"query\": \"SELECT * FROM library ORDER BY page_count DESC LIMIT 5\"\n}"

@endpoint POST /_sql
@desc Get SQL search results
@optional {format: str # The format for the response. You can also specify a format using the `Accept` HTTP header. If you specify both this parameter and the `Accept` HTTP header, this parameter takes precedence., allow_partial_search_results: bool=false # If `true`, the response has partial results when there are shard request timeouts or shard failures. If `false`, the API returns an error with no partial results., catalog: str # The default catalog (cluster) for queries. If unspecified, the queries execute on the data in the local cluster only., columnar: bool=false # If `true`, the results are in a columnar fashion: one row represents all the values of a certain column from the current page of results. The API supports this parameter only for CBOR, JSON, SMILE, and YAML responses., cursor: str # The cursor used to retrieve a set of paginated results. If you specify a cursor, the API only uses the `columnar` and `time_zone` request body parameters. It ignores other request body parameters., fetch_size: num=1000 # The maximum number of rows (or entries) to return in one response., field_multi_value_leniency: bool=false # If `false`, the API returns an exception when encountering multiple values for a field. If `true`, the API is lenient and returns the first value from the array with no guarantee of consistent results., filter: any=none # The Elasticsearch query DSL for additional filtering., index_using_frozen: bool=false # If `true`, the search can run on frozen indices., keep_alive: any=5d # The retention period for an async or saved synchronous search., keep_on_completion: bool=false # If `true`, Elasticsearch stores synchronous searches if you also specify the `wait_for_completion_timeout` parameter. If `false`, Elasticsearch only stores async searches that don't finish before the `wait_for_completion_timeout`., page_timeout: any=45s # The minimum retention period for the scroll cursor. After this time period, a pagination request might fail because the scroll cursor is no longer available. Subsequent scroll requests prolong the lifetime of the scroll cursor by the duration of `page_timeout` in the scroll request., params: [map] # The values for parameters in the query., query: str # The SQL query to run., request_timeout: any=90s # The timeout before the request fails., runtime_mappings: any # One or more runtime fields for the search request. These fields take precedence over mapped fields with the same name., time_zone: any=Z # The ISO-8601 time zone ID for the search., wait_for_completion_timeout: any # The period to wait for complete results. It defaults to no timeout, meaning the request waits for complete search results. If the search doesn't finish within this period, the search becomes async.  To save a synchronous search, you must specify this parameter and the `keep_on_completion` parameter.}
@returns(200) {columns: [map], cursor: str, id: any, is_running: bool, is_partial: bool, rows: [[map]]}
@example_request "{\n  \"query\": \"SELECT * FROM library ORDER BY page_count DESC LIMIT 5\"\n}"

@endpoint GET /_sql/translate
@desc Translate SQL into Elasticsearch queries
@required {query: str # The SQL query to run.}
@optional {fetch_size: num=1000 # The maximum number of rows (or entries) to return in one response., filter: any=none # The Elasticsearch query DSL for additional filtering., time_zone: any=Z # The ISO-8601 time zone ID for the search.}
@returns(200) {aggregations: map, size: num, _source: any, fields: [map], query: any, sort: any, track_total_hits: any}
@example_request "{\n  \"query\": \"SELECT * FROM library ORDER BY page_count DESC\",\n  \"fetch_size\": 10\n}"

@endpoint POST /_sql/translate
@desc Translate SQL into Elasticsearch queries
@required {query: str # The SQL query to run.}
@optional {fetch_size: num=1000 # The maximum number of rows (or entries) to return in one response., filter: any=none # The Elasticsearch query DSL for additional filtering., time_zone: any=Z # The ISO-8601 time zone ID for the search.}
@returns(200) {aggregations: map, size: num, _source: any, fields: [map], query: any, sort: any, track_total_hits: any}
@example_request "{\n  \"query\": \"SELECT * FROM library ORDER BY page_count DESC\",\n  \"fetch_size\": 10\n}"

@endgroup

@group _ssl
@endpoint GET /_ssl/certificates
@desc Get SSL certificates
@returns(200)

@endgroup

@group _streams
@endpoint POST /_streams/{name}/_disable
@desc Disable a named stream
@required {name: str # The stream type to disable.}
@optional {master_timeout: any # The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error., timeout: any # The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error.}
@returns(200) {acknowledged: bool}

@endpoint POST /_streams/{name}/_enable
@desc Enable a named stream
@required {name: str # The stream type to enable.}
@optional {master_timeout: any # The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error., timeout: any # The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error.}
@returns(200) {acknowledged: bool}

@endpoint GET /_streams/status
@desc Get the status of streams
@optional {master_timeout: any # Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error.}
@returns(200) {logs: any, logs.otel: any, logs.ecs: any}

@endgroup

@group _synonyms
@endpoint GET /_synonyms/{id}
@desc Get a synonym set
@required {id: str # The synonyms set identifier to retrieve.}
@optional {from: num # The starting offset for query rules to retrieve., size: num # The max number of query rules to retrieve.}
@returns(200) {count: num, synonyms_set: [map]}

@endpoint PUT /_synonyms/{id}
@desc Create or update a synonym set
@required {id: str # The ID of the synonyms set to be created or updated., synonyms_set: any # The synonym rules definitions for the synonyms set.}
@optional {refresh: bool # If `true`, the request will refresh the analyzers with the new synonyms set and wait for the new synonyms to be available before returning. If `false`, analyzers will not be reloaded with the new synonym set}
@returns(200) {result: any, reload_analyzers_details: any}
@example_request "\n{\n  \"synonyms_set\": {\n  \"synonyms\" : \"hello, hi, howdy\"\n  }\n}"

@endpoint DELETE /_synonyms/{id}
@desc Delete a synonym set
@required {id: str # The synonyms set identifier to delete.}
@returns(200) {acknowledged: bool}

@endpoint GET /_synonyms/{set_id}/{rule_id}
@desc Get a synonym rule
@required {set_id: str # The ID of the synonym set to retrieve the synonym rule from., rule_id: str # The ID of the synonym rule to retrieve.}
@returns(200) {id: any, synonyms: any}

@endpoint PUT /_synonyms/{set_id}/{rule_id}
@desc Create or update a synonym rule
@required {set_id: str # The ID of the synonym set., rule_id: str # The ID of the synonym rule to be updated or created., synonyms: any # The synonym rule information definition, which must be in Solr format.}
@optional {refresh: bool # If `true`, the request will refresh the analyzers with the new synonym rule and wait for the new synonyms to be available before returning. If `false`, analyzers will not be reloaded with the new synonym rule}
@returns(200) {result: any, reload_analyzers_details: any}
@example_request "{\n  \"synonyms\": \"hello, hi, howdy\"\n}"

@endpoint DELETE /_synonyms/{set_id}/{rule_id}
@desc Delete a synonym rule
@required {set_id: str # The ID of the synonym set to update., rule_id: str # The ID of the synonym rule to delete.}
@optional {refresh: bool # If `true`, the request will refresh the analyzers with the deleted synonym rule and wait for the new synonyms to be available before returning. If `false`, analyzers will not be reloaded with the deleted synonym rule}
@returns(200) {result: any, reload_analyzers_details: any}

@endpoint GET /_synonyms
@desc Get all synonym sets
@optional {from: num # The starting offset for synonyms sets to retrieve., size: num # The maximum number of synonyms sets to retrieve.}
@returns(200) {count: num, results: [map]}

@endgroup

@group _tasks
@endpoint POST /_tasks/_cancel
@desc Cancel a task
@optional {actions: any # A comma-separated list or wildcard expression of actions that is used to limit the request., nodes: [str] # A comma-separated list of node IDs or names that is used to limit the request., parent_task_id: str # A parent task ID that is used to limit the tasks., wait_for_completion: bool # If true, the request blocks until all found tasks are complete.}
@returns(200) {node_failures: [map], task_failures: [map], nodes: map, tasks: any}

@endpoint POST /_tasks/{task_id}/_cancel
@desc Cancel a task
@required {task_id: str # The task identifier.}
@optional {actions: any # A comma-separated list or wildcard expression of actions that is used to limit the request., nodes: [str] # A comma-separated list of node IDs or names that is used to limit the request., parent_task_id: str # A parent task ID that is used to limit the tasks., wait_for_completion: bool # If true, the request blocks until all found tasks are complete.}
@returns(200) {node_failures: [map], task_failures: [map], nodes: map, tasks: any}

@endpoint GET /_tasks/{task_id}
@desc Get task information
@required {task_id: str # The task identifier.}
@optional {timeout: any # The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error., wait_for_completion: bool # If `true`, the request blocks until the task has completed.}
@returns(200) {completed: bool, task: any, response: map, error: any}

@endpoint GET /_tasks
@desc Get all tasks
@optional {actions: any # A comma-separated list or wildcard expression of actions used to limit the request. For example, you can use `cluser:*` to retrieve all cluster-related tasks., detailed: bool # If `true`, the response includes detailed information about the running tasks. This information is useful to distinguish tasks from each other but is more costly to run., group_by: str # A key that is used to group tasks in the response. The task lists can be grouped either by nodes or by parent tasks., nodes: any # A comma-separated list of node IDs or names that is used to limit the returned information., parent_task_id: str # A parent task identifier that is used to limit returned information. To return all tasks, omit this parameter or use a value of `-1`. If the parent task is not found, the API does not return a 404 response code., timeout: any # The period to wait for each node to respond. If a node does not respond before its timeout expires, the response does not include its information. However, timed out nodes are included in the `node_failures` property., wait_for_completion: bool # If `true`, the request blocks until the operation is complete.}
@returns(200) {node_failures: [map], task_failures: [map], nodes: map, tasks: any}

@endgroup

@group {index}
@endpoint GET /{index}/_terms_enum
@desc Get terms in an index
@required {index: any # A comma-separated list of data streams, indices, and index aliases to search. Wildcard (`*`) expressions are supported. To search all data streams or indices, omit this parameter or use `*`  or `_all`., field: any # The string to match at the start of indexed terms. If not provided, all terms in the field are considered.}
@optional {size: num=10 # The number of matching terms to return., timeout: any=1s # The maximum length of time to spend collecting results. If the timeout is exceeded the `complete` flag set to `false` in the response and the results may be partial or empty., case_insensitive: bool=false # When `true`, the provided search string is matched against index terms without case sensitivity., index_filter: any # Filter an index shard if the provided query rewrites to `match_none`., string: str # The string to match at the start of indexed terms. If it is not provided, all terms in the field are considered.  > info > The prefix string cannot be larger than the largest possible keyword value, which is Lucene's term byte-length limit of 32766., search_after: str # The string after which terms in the index should be returned. It allows for a form of pagination if the last result from one request is passed as the `search_after` parameter for a subsequent request.}
@returns(200) {_shards: any, terms: [str], complete: bool}
@example_request "{\n    \"field\" : \"tags\",\n    \"string\" : \"kiba\"\n}"

@endpoint POST /{index}/_terms_enum
@desc Get terms in an index
@required {index: any # A comma-separated list of data streams, indices, and index aliases to search. Wildcard (`*`) expressions are supported. To search all data streams or indices, omit this parameter or use `*`  or `_all`., field: any # The string to match at the start of indexed terms. If not provided, all terms in the field are considered.}
@optional {size: num=10 # The number of matching terms to return., timeout: any=1s # The maximum length of time to spend collecting results. If the timeout is exceeded the `complete` flag set to `false` in the response and the results may be partial or empty., case_insensitive: bool=false # When `true`, the provided search string is matched against index terms without case sensitivity., index_filter: any # Filter an index shard if the provided query rewrites to `match_none`., string: str # The string to match at the start of indexed terms. If it is not provided, all terms in the field are considered.  > info > The prefix string cannot be larger than the largest possible keyword value, which is Lucene's term byte-length limit of 32766., search_after: str # The string after which terms in the index should be returned. It allows for a form of pagination if the last result from one request is passed as the `search_after` parameter for a subsequent request.}
@returns(200) {_shards: any, terms: [str], complete: bool}
@example_request "{\n    \"field\" : \"tags\",\n    \"string\" : \"kiba\"\n}"

@endpoint GET /{index}/_termvectors/{id}
@desc Get term vector information
@required {index: str # The name of the index that contains the document., id: str # A unique identifier for the document.}
@optional {fields: any # A comma-separated list or wildcard expressions of fields to include in the statistics. It is used as the default list unless a specific field list is provided in the `completion_fields` or `fielddata_fields` parameters., field_statistics: bool # If `true`, the response includes:  * The document count (how many documents contain this field). * The sum of document frequencies (the sum of document frequencies for all terms in this field). * The sum of total term frequencies (the sum of total term frequencies of each term in this field)., offsets: bool # If `true`, the response includes term offsets., payloads: bool # If `true`, the response includes term payloads., positions: bool # If `true`, the response includes term positions., preference: str # The node or shard the operation should be performed on. It is random by default., realtime: bool # If true, the request is real-time as opposed to near-real-time., routing: any # A custom value that is used to route operations to a specific shard., term_statistics: bool # If `true`, the response includes:  * The total term frequency (how often a term occurs in all documents). * The document frequency (the number of documents containing the current term).  By default these values are not returned since term statistics can have a serious performance impact., version: num # If `true`, returns the document version as part of a hit., version_type: str # The version type., doc: map # An artificial document (a document not present in the index) for which you want to retrieve term vectors., filter: any # Filter terms based on their tf-idf scores. This could be useful in order find out a good characteristic vector of a document. This feature works in a similar manner to the second phase of the More Like This Query., per_field_analyzer: map # Override the default per-field analyzer. This is useful in order to generate term vectors in any fashion, especially when using artificial documents. When providing an analyzer for a field that already stores term vectors, the term vectors will be regenerated., fields: [str] # A list of fields to include in the statistics. It is used as the default list unless a specific field list is provided in the `completion_fields` or `fielddata_fields` parameters., field_statistics: bool=true # If `true`, the response includes:  * The document count (how many documents contain this field). * The sum of document frequencies (the sum of document frequencies for all terms in this field). * The sum of total term frequencies (the sum of total term frequencies of each term in this field)., offsets: bool=true # If `true`, the response includes term offsets., payloads: bool=true # If `true`, the response includes term payloads., positions: bool=true # If `true`, the response includes term positions., term_statistics: bool=false # If `true`, the response includes:  * The total term frequency (how often a term occurs in all documents). * The document frequency (the number of documents containing the current term).  By default these values are not returned since term statistics can have a serious performance impact., routing: any # A custom value that is used to route operations to a specific shard., version: any # If `true`, returns the document version as part of a hit., version_type: any # The version type.}
@returns(200) {found: bool, _id: any, _index: any, term_vectors: map, took: num, _version: any}
@example_request "{\n  \"fields\" : [\"text\"],\n  \"offsets\" : true,\n  \"payloads\" : true,\n  \"positions\" : true,\n  \"term_statistics\" : true,\n  \"field_statistics\" : true\n}"

@endpoint POST /{index}/_termvectors/{id}
@desc Get term vector information
@required {index: str # The name of the index that contains the document., id: str # A unique identifier for the document.}
@optional {fields: any # A comma-separated list or wildcard expressions of fields to include in the statistics. It is used as the default list unless a specific field list is provided in the `completion_fields` or `fielddata_fields` parameters., field_statistics: bool # If `true`, the response includes:  * The document count (how many documents contain this field). * The sum of document frequencies (the sum of document frequencies for all terms in this field). * The sum of total term frequencies (the sum of total term frequencies of each term in this field)., offsets: bool # If `true`, the response includes term offsets., payloads: bool # If `true`, the response includes term payloads., positions: bool # If `true`, the response includes term positions., preference: str # The node or shard the operation should be performed on. It is random by default., realtime: bool # If true, the request is real-time as opposed to near-real-time., routing: any # A custom value that is used to route operations to a specific shard., term_statistics: bool # If `true`, the response includes:  * The total term frequency (how often a term occurs in all documents). * The document frequency (the number of documents containing the current term).  By default these values are not returned since term statistics can have a serious performance impact., version: num # If `true`, returns the document version as part of a hit., version_type: str # The version type., doc: map # An artificial document (a document not present in the index) for which you want to retrieve term vectors., filter: any # Filter terms based on their tf-idf scores. This could be useful in order find out a good characteristic vector of a document. This feature works in a similar manner to the second phase of the More Like This Query., per_field_analyzer: map # Override the default per-field analyzer. This is useful in order to generate term vectors in any fashion, especially when using artificial documents. When providing an analyzer for a field that already stores term vectors, the term vectors will be regenerated., fields: [str] # A list of fields to include in the statistics. It is used as the default list unless a specific field list is provided in the `completion_fields` or `fielddata_fields` parameters., field_statistics: bool=true # If `true`, the response includes:  * The document count (how many documents contain this field). * The sum of document frequencies (the sum of document frequencies for all terms in this field). * The sum of total term frequencies (the sum of total term frequencies of each term in this field)., offsets: bool=true # If `true`, the response includes term offsets., payloads: bool=true # If `true`, the response includes term payloads., positions: bool=true # If `true`, the response includes term positions., term_statistics: bool=false # If `true`, the response includes:  * The total term frequency (how often a term occurs in all documents). * The document frequency (the number of documents containing the current term).  By default these values are not returned since term statistics can have a serious performance impact., routing: any # A custom value that is used to route operations to a specific shard., version: any # If `true`, returns the document version as part of a hit., version_type: any # The version type.}
@returns(200) {found: bool, _id: any, _index: any, term_vectors: map, took: num, _version: any}
@example_request "{\n  \"fields\" : [\"text\"],\n  \"offsets\" : true,\n  \"payloads\" : true,\n  \"positions\" : true,\n  \"term_statistics\" : true,\n  \"field_statistics\" : true\n}"

@endpoint GET /{index}/_termvectors
@desc Get term vector information
@required {index: str # The name of the index that contains the document.}
@optional {fields: any # A comma-separated list or wildcard expressions of fields to include in the statistics. It is used as the default list unless a specific field list is provided in the `completion_fields` or `fielddata_fields` parameters., field_statistics: bool # If `true`, the response includes:  * The document count (how many documents contain this field). * The sum of document frequencies (the sum of document frequencies for all terms in this field). * The sum of total term frequencies (the sum of total term frequencies of each term in this field)., offsets: bool # If `true`, the response includes term offsets., payloads: bool # If `true`, the response includes term payloads., positions: bool # If `true`, the response includes term positions., preference: str # The node or shard the operation should be performed on. It is random by default., realtime: bool # If true, the request is real-time as opposed to near-real-time., routing: any # A custom value that is used to route operations to a specific shard., term_statistics: bool # If `true`, the response includes:  * The total term frequency (how often a term occurs in all documents). * The document frequency (the number of documents containing the current term).  By default these values are not returned since term statistics can have a serious performance impact., version: num # If `true`, returns the document version as part of a hit., version_type: str # The version type., doc: map # An artificial document (a document not present in the index) for which you want to retrieve term vectors., filter: any # Filter terms based on their tf-idf scores. This could be useful in order find out a good characteristic vector of a document. This feature works in a similar manner to the second phase of the More Like This Query., per_field_analyzer: map # Override the default per-field analyzer. This is useful in order to generate term vectors in any fashion, especially when using artificial documents. When providing an analyzer for a field that already stores term vectors, the term vectors will be regenerated., fields: [str] # A list of fields to include in the statistics. It is used as the default list unless a specific field list is provided in the `completion_fields` or `fielddata_fields` parameters., field_statistics: bool=true # If `true`, the response includes:  * The document count (how many documents contain this field). * The sum of document frequencies (the sum of document frequencies for all terms in this field). * The sum of total term frequencies (the sum of total term frequencies of each term in this field)., offsets: bool=true # If `true`, the response includes term offsets., payloads: bool=true # If `true`, the response includes term payloads., positions: bool=true # If `true`, the response includes term positions., term_statistics: bool=false # If `true`, the response includes:  * The total term frequency (how often a term occurs in all documents). * The document frequency (the number of documents containing the current term).  By default these values are not returned since term statistics can have a serious performance impact., routing: any # A custom value that is used to route operations to a specific shard., version: any # If `true`, returns the document version as part of a hit., version_type: any # The version type.}
@returns(200) {found: bool, _id: any, _index: any, term_vectors: map, took: num, _version: any}
@example_request "{\n  \"fields\" : [\"text\"],\n  \"offsets\" : true,\n  \"payloads\" : true,\n  \"positions\" : true,\n  \"term_statistics\" : true,\n  \"field_statistics\" : true\n}"

@endpoint POST /{index}/_termvectors
@desc Get term vector information
@required {index: str # The name of the index that contains the document.}
@optional {fields: any # A comma-separated list or wildcard expressions of fields to include in the statistics. It is used as the default list unless a specific field list is provided in the `completion_fields` or `fielddata_fields` parameters., field_statistics: bool # If `true`, the response includes:  * The document count (how many documents contain this field). * The sum of document frequencies (the sum of document frequencies for all terms in this field). * The sum of total term frequencies (the sum of total term frequencies of each term in this field)., offsets: bool # If `true`, the response includes term offsets., payloads: bool # If `true`, the response includes term payloads., positions: bool # If `true`, the response includes term positions., preference: str # The node or shard the operation should be performed on. It is random by default., realtime: bool # If true, the request is real-time as opposed to near-real-time., routing: any # A custom value that is used to route operations to a specific shard., term_statistics: bool # If `true`, the response includes:  * The total term frequency (how often a term occurs in all documents). * The document frequency (the number of documents containing the current term).  By default these values are not returned since term statistics can have a serious performance impact., version: num # If `true`, returns the document version as part of a hit., version_type: str # The version type., doc: map # An artificial document (a document not present in the index) for which you want to retrieve term vectors., filter: any # Filter terms based on their tf-idf scores. This could be useful in order find out a good characteristic vector of a document. This feature works in a similar manner to the second phase of the More Like This Query., per_field_analyzer: map # Override the default per-field analyzer. This is useful in order to generate term vectors in any fashion, especially when using artificial documents. When providing an analyzer for a field that already stores term vectors, the term vectors will be regenerated., fields: [str] # A list of fields to include in the statistics. It is used as the default list unless a specific field list is provided in the `completion_fields` or `fielddata_fields` parameters., field_statistics: bool=true # If `true`, the response includes:  * The document count (how many documents contain this field). * The sum of document frequencies (the sum of document frequencies for all terms in this field). * The sum of total term frequencies (the sum of total term frequencies of each term in this field)., offsets: bool=true # If `true`, the response includes term offsets., payloads: bool=true # If `true`, the response includes term payloads., positions: bool=true # If `true`, the response includes term positions., term_statistics: bool=false # If `true`, the response includes:  * The total term frequency (how often a term occurs in all documents). * The document frequency (the number of documents containing the current term).  By default these values are not returned since term statistics can have a serious performance impact., routing: any # A custom value that is used to route operations to a specific shard., version: any # If `true`, returns the document version as part of a hit., version_type: any # The version type.}
@returns(200) {found: bool, _id: any, _index: any, term_vectors: map, took: num, _version: any}
@example_request "{\n  \"fields\" : [\"text\"],\n  \"offsets\" : true,\n  \"payloads\" : true,\n  \"positions\" : true,\n  \"term_statistics\" : true,\n  \"field_statistics\" : true\n}"

@endgroup

@group _text_structure
@endpoint GET /_text_structure/find_field_structure
@desc Find the structure of a text field
@required {field: str # The field that should be analyzed., index: str # The name of the index that contains the analyzed field.}
@optional {column_names: any # If `format` is set to `delimited`, you can specify the column names in a comma-separated list. If this parameter is not specified, the structure finder uses the column names from the header row of the text. If the text does not have a header row, columns are named "column1", "column2", "column3", for example., delimiter: str # If you have set `format` to `delimited`, you can specify the character used to delimit the values in each row. Only a single character is supported; the delimiter cannot have multiple characters. By default, the API considers the following possibilities: comma, tab, semi-colon, and pipe (`|`). In this default scenario, all rows must have the same number of fields for the delimited format to be detected. If you specify a delimiter, up to 10% of the rows can have a different number of columns than the first row., documents_to_sample: num # The number of documents to include in the structural analysis. The minimum value is 2., ecs_compatibility: str # The mode of compatibility with ECS compliant Grok patterns. Use this parameter to specify whether to use ECS Grok patterns instead of legacy ones when the structure finder creates a Grok pattern. This setting primarily has an impact when a whole message Grok pattern such as `%{CATALINALOG}` matches the input. If the structure finder identifies a common structure but has no idea of the meaning then generic field names such as `path`, `ipaddress`, `field1`, and `field2` are used in the `grok_pattern` output. The intention in that situation is that a user who knows the meanings will rename the fields before using them., explain: bool # If `true`, the response includes a field named `explanation`, which is an array of strings that indicate how the structure finder produced its result., format: str # The high level structure of the text. By default, the API chooses the format. In this default scenario, all rows must have the same number of fields for a delimited format to be detected. If the format is set to delimited and the delimiter is not set, however, the API tolerates up to 5% of rows that have a different number of columns than the first row., grok_pattern: str # If the format is `semi_structured_text`, you can specify a Grok pattern that is used to extract fields from every message in the text. The name of the timestamp field in the Grok pattern must match what is specified in the `timestamp_field` parameter. If that parameter is not specified, the name of the timestamp field in the Grok pattern must match "timestamp". If `grok_pattern` is not specified, the structure finder creates a Grok pattern., quote: str # If the format is `delimited`, you can specify the character used to quote the values in each row if they contain newlines or the delimiter character. Only a single character is supported. If this parameter is not specified, the default value is a double quote (`"`). If your delimited text format does not use quoting, a workaround is to set this argument to a character that does not appear anywhere in the sample., should_trim_fields: bool # If the format is `delimited`, you can specify whether values between delimiters should have whitespace trimmed from them. If this parameter is not specified and the delimiter is pipe (`|`), the default value is true. Otherwise, the default value is `false`., should_parse_recursively: bool # If the format is `ndjson`, you can specify whether to parse nested JSON objects recursively. The nested objects are parsed to a maximum depth equal to the default value of the `index.mapping.depth.limit` setting. Anything beyond that depth is parsed as an `object` type field. For formats other than `ndjson`, this parameter is ignored., timeout: any # The maximum amount of time that the structure analysis can take. If the analysis is still running when the timeout expires, it will be stopped., timestamp_field: str # The name of the field that contains the primary timestamp of each record in the text. In particular, if the text was ingested into an index, this is the field that would be used to populate the `@timestamp` field.  If the format is `semi_structured_text`, this field must match the name of the appropriate extraction in the `grok_pattern`. Therefore, for semi-structured text, it is best not to specify this parameter unless `grok_pattern` is also specified.  For structured text, if you specify this parameter, the field must exist within the text.  If this parameter is not specified, the structure finder makes a decision about which field (if any) is the primary timestamp field. For structured text, it is not compulsory to have a timestamp in the text., timestamp_format: str # The Java time format of the timestamp field in the text. Only a subset of Java time format letter groups are supported:  * `a` * `d` * `dd` * `EEE` * `EEEE` * `H` * `HH` * `h` * `M` * `MM` * `MMM` * `MMMM` * `mm` * `ss` * `XX` * `XXX` * `yy` * `yyyy` * `zzz`  Additionally `S` letter groups (fractional seconds) of length one to nine are supported providing they occur after `ss` and are separated from the `ss` by a period (`.`), comma (`,`), or colon (`:`). Spacing and punctuation is also permitted with the exception a question mark (`?`), newline, and carriage return, together with literal text enclosed in single quotes. For example, `MM/dd HH.mm.ss,SSSSSS 'in' yyyy` is a valid override format.  One valuable use case for this parameter is when the format is semi-structured text, there are multiple timestamp formats in the text, and you know which format corresponds to the primary timestamp, but you do not want to specify the full `grok_pattern`. Another is when the timestamp format is one that the structure finder does not consider by default.  If this parameter is not specified, the structure finder chooses the best format from a built-in set.  If the special value `null` is specified, the structure finder will not look for a primary timestamp in the text. When the format is semi-structured text, this will result in the structure finder treating the text as single-line messages.}
@returns(200) {charset: str, ecs_compatibility: any, field_stats: map, format: any, grok_pattern: any, java_timestamp_formats: [str], joda_timestamp_formats: [str], ingest_pipeline: any, mappings: any, multiline_start_pattern: str, need_client_timezone: bool, num_lines_analyzed: num, num_messages_analyzed: num, sample_start: str, timestamp_field: any}

@endpoint GET /_text_structure/find_message_structure
@desc Find the structure of text messages
@required {messages: [str] # The list of messages you want to analyze.}
@optional {column_names: any # If the format is `delimited`, you can specify the column names in a comma-separated list. If this parameter is not specified, the structure finder uses the column names from the header row of the text. If the text does not have a header role, columns are named "column1", "column2", "column3", for example., delimiter: str # If you the format is `delimited`, you can specify the character used to delimit the values in each row. Only a single character is supported; the delimiter cannot have multiple characters. By default, the API considers the following possibilities: comma, tab, semi-colon, and pipe (`|`). In this default scenario, all rows must have the same number of fields for the delimited format to be detected. If you specify a delimiter, up to 10% of the rows can have a different number of columns than the first row., ecs_compatibility: str # The mode of compatibility with ECS compliant Grok patterns. Use this parameter to specify whether to use ECS Grok patterns instead of legacy ones when the structure finder creates a Grok pattern. This setting primarily has an impact when a whole message Grok pattern such as `%{CATALINALOG}` matches the input. If the structure finder identifies a common structure but has no idea of meaning then generic field names such as `path`, `ipaddress`, `field1`, and `field2` are used in the `grok_pattern` output, with the intention that a user who knows the meanings rename these fields before using it., explain: bool # If this parameter is set to true, the response includes a field named `explanation`, which is an array of strings that indicate how the structure finder produced its result., format: str # The high level structure of the text. By default, the API chooses the format. In this default scenario, all rows must have the same number of fields for a delimited format to be detected. If the format is `delimited` and the delimiter is not set, however, the API tolerates up to 5% of rows that have a different number of columns than the first row., grok_pattern: str # If the format is `semi_structured_text`, you can specify a Grok pattern that is used to extract fields from every message in the text. The name of the timestamp field in the Grok pattern must match what is specified in the `timestamp_field` parameter. If that parameter is not specified, the name of the timestamp field in the Grok pattern must match "timestamp". If `grok_pattern` is not specified, the structure finder creates a Grok pattern., quote: str # If the format is `delimited`, you can specify the character used to quote the values in each row if they contain newlines or the delimiter character. Only a single character is supported. If this parameter is not specified, the default value is a double quote (`"`). If your delimited text format does not use quoting, a workaround is to set this argument to a character that does not appear anywhere in the sample., should_trim_fields: bool # If the format is `delimited`, you can specify whether values between delimiters should have whitespace trimmed from them. If this parameter is not specified and the delimiter is pipe (`|`), the default value is true. Otherwise, the default value is `false`., should_parse_recursively: bool # If the format is `ndjson`, you can specify whether to parse nested JSON objects recursively. The nested objects are parsed to a maximum depth equal to the default value of the `index.mapping.depth.limit` setting. Anything beyond that depth is parsed as an `object` type field. For formats other than `ndjson`, this parameter is ignored., timeout: any # The maximum amount of time that the structure analysis can take. If the analysis is still running when the timeout expires, it will be stopped., timestamp_field: str # The name of the field that contains the primary timestamp of each record in the text. In particular, if the text was ingested into an index, this is the field that would be used to populate the `@timestamp` field.  If the format is `semi_structured_text`, this field must match the name of the appropriate extraction in the `grok_pattern`. Therefore, for semi-structured text, it is best not to specify this parameter unless `grok_pattern` is also specified.  For structured text, if you specify this parameter, the field must exist within the text.  If this parameter is not specified, the structure finder makes a decision about which field (if any) is the primary timestamp field. For structured text, it is not compulsory to have a timestamp in the text., timestamp_format: str # The Java time format of the timestamp field in the text. Only a subset of Java time format letter groups are supported:  * `a` * `d` * `dd` * `EEE` * `EEEE` * `H` * `HH` * `h` * `M` * `MM` * `MMM` * `MMMM` * `mm` * `ss` * `XX` * `XXX` * `yy` * `yyyy` * `zzz`  Additionally `S` letter groups (fractional seconds) of length one to nine are supported providing they occur after `ss` and are separated from the `ss` by a period (`.`), comma (`,`), or colon (`:`). Spacing and punctuation is also permitted with the exception a question mark (`?`), newline, and carriage return, together with literal text enclosed in single quotes. For example, `MM/dd HH.mm.ss,SSSSSS 'in' yyyy` is a valid override format.  One valuable use case for this parameter is when the format is semi-structured text, there are multiple timestamp formats in the text, and you know which format corresponds to the primary timestamp, but you do not want to specify the full `grok_pattern`. Another is when the timestamp format is one that the structure finder does not consider by default.  If this parameter is not specified, the structure finder chooses the best format from a built-in set.  If the special value `null` is specified, the structure finder will not look for a primary timestamp in the text. When the format is semi-structured text, this will result in the structure finder treating the text as single-line messages.}
@returns(200) {charset: str, ecs_compatibility: any, field_stats: map, format: any, grok_pattern: any, java_timestamp_formats: [str], joda_timestamp_formats: [str], ingest_pipeline: any, mappings: any, multiline_start_pattern: str, need_client_timezone: bool, num_lines_analyzed: num, num_messages_analyzed: num, sample_start: str, timestamp_field: any}
@example_request "{\n  \"messages\": [\n    \"[2024-03-05T10:52:36,256][INFO ][o.a.l.u.VectorUtilPanamaProvider] [laptop] Java vector incubator API enabled; uses preferredBitSize=128\",\n    \"[2024-03-05T10:52:41,038][INFO ][o.e.p.PluginsService     ] [laptop] loaded module [repository-url]\",\n    \"[2024-03-05T10:52:41,042][INFO ][o.e.p.PluginsService     ] [laptop] loaded module [rest-root]\",\n    \"[2024-03-05T10:52:41,043][INFO ][o.e.p.PluginsService     ] [laptop] loaded module [x-pack-core]\",\n    \"[2024-03-05T10:52:41,043][INFO ][o.e.p.PluginsService     ] [laptop] loaded module [x-pack-redact]\",\n    \"[2024-03-05T10:52:41,043][INFO ][o.e.p.PluginsService     ] [laptop] loaded module [ingest-user-agent]\",\n    \"[2024-03-05T10:52:41,044][INFO ][o.e.p.PluginsService     ] [laptop] loaded module [x-pack-monitoring]\",\n    \"[2024-03-05T10:52:41,044][INFO ][o.e.p.PluginsService     ] [laptop] loaded module [repository-s3]\",\n    \"[2024-03-05T10:52:41,044][INFO ][o.e.p.PluginsService     ] [laptop] loaded module [x-pack-analytics]\",\n    \"[2024-03-05T10:52:41,044][INFO ][o.e.p.PluginsService     ] [laptop] loaded module [x-pack-ent-search]\",\n    \"[2024-03-05T10:52:41,044][INFO ][o.e.p.PluginsService     ] [laptop] loaded module [x-pack-autoscaling]\",\n    \"[2024-03-05T10:52:41,044][INFO ][o.e.p.PluginsService     ] [laptop] loaded module [lang-painless]]\",\n    \"[2024-03-05T10:52:41,059][INFO ][o.e.p.PluginsService     ] [laptop] loaded module [lang-expression]\",\n    \"[2024-03-05T10:52:41,059][INFO ][o.e.p.PluginsService     ] [laptop] loaded module [x-pack-eql]\",\n    \"[2024-03-05T10:52:43,291][INFO ][o.e.e.NodeEnvironment    ] [laptop] heap size [16gb], compressed ordinary object pointers [true]\",\n    \"[2024-03-05T10:52:46,098][INFO ][o.e.x.s.Security         ] [laptop] Security is enabled\",\n    \"[2024-03-05T10:52:47,227][INFO ][o.e.x.p.ProfilingPlugin  ] [laptop] Profiling is enabled\",\n    \"[2024-03-05T10:52:47,259][INFO ][o.e.x.p.ProfilingPlugin  ] [laptop] profiling index templates will not be installed or reinstalled\",\n    \"[2024-03-05T10:52:47,755][INFO ][o.e.i.r.RecoverySettings ] [laptop] using rate limit [40mb] with [default=40mb, read=0b, write=0b, max=0b]\",\n    \"[2024-03-05T10:52:47,787][INFO ][o.e.d.DiscoveryModule    ] [laptop] using discovery type [multi-node] and seed hosts providers [settings]\",\n    \"[2024-03-05T10:52:49,188][INFO ][o.e.n.Node               ] [laptop] initialized\",\n    \"[2024-03-05T10:52:49,199][INFO ][o.e.n.Node               ] [laptop] starting ...\"\n  ]\n}"

@endpoint POST /_text_structure/find_message_structure
@desc Find the structure of text messages
@required {messages: [str] # The list of messages you want to analyze.}
@optional {column_names: any # If the format is `delimited`, you can specify the column names in a comma-separated list. If this parameter is not specified, the structure finder uses the column names from the header row of the text. If the text does not have a header role, columns are named "column1", "column2", "column3", for example., delimiter: str # If you the format is `delimited`, you can specify the character used to delimit the values in each row. Only a single character is supported; the delimiter cannot have multiple characters. By default, the API considers the following possibilities: comma, tab, semi-colon, and pipe (`|`). In this default scenario, all rows must have the same number of fields for the delimited format to be detected. If you specify a delimiter, up to 10% of the rows can have a different number of columns than the first row., ecs_compatibility: str # The mode of compatibility with ECS compliant Grok patterns. Use this parameter to specify whether to use ECS Grok patterns instead of legacy ones when the structure finder creates a Grok pattern. This setting primarily has an impact when a whole message Grok pattern such as `%{CATALINALOG}` matches the input. If the structure finder identifies a common structure but has no idea of meaning then generic field names such as `path`, `ipaddress`, `field1`, and `field2` are used in the `grok_pattern` output, with the intention that a user who knows the meanings rename these fields before using it., explain: bool # If this parameter is set to true, the response includes a field named `explanation`, which is an array of strings that indicate how the structure finder produced its result., format: str # The high level structure of the text. By default, the API chooses the format. In this default scenario, all rows must have the same number of fields for a delimited format to be detected. If the format is `delimited` and the delimiter is not set, however, the API tolerates up to 5% of rows that have a different number of columns than the first row., grok_pattern: str # If the format is `semi_structured_text`, you can specify a Grok pattern that is used to extract fields from every message in the text. The name of the timestamp field in the Grok pattern must match what is specified in the `timestamp_field` parameter. If that parameter is not specified, the name of the timestamp field in the Grok pattern must match "timestamp". If `grok_pattern` is not specified, the structure finder creates a Grok pattern., quote: str # If the format is `delimited`, you can specify the character used to quote the values in each row if they contain newlines or the delimiter character. Only a single character is supported. If this parameter is not specified, the default value is a double quote (`"`). If your delimited text format does not use quoting, a workaround is to set this argument to a character that does not appear anywhere in the sample., should_trim_fields: bool # If the format is `delimited`, you can specify whether values between delimiters should have whitespace trimmed from them. If this parameter is not specified and the delimiter is pipe (`|`), the default value is true. Otherwise, the default value is `false`., should_parse_recursively: bool # If the format is `ndjson`, you can specify whether to parse nested JSON objects recursively. The nested objects are parsed to a maximum depth equal to the default value of the `index.mapping.depth.limit` setting. Anything beyond that depth is parsed as an `object` type field. For formats other than `ndjson`, this parameter is ignored., timeout: any # The maximum amount of time that the structure analysis can take. If the analysis is still running when the timeout expires, it will be stopped., timestamp_field: str # The name of the field that contains the primary timestamp of each record in the text. In particular, if the text was ingested into an index, this is the field that would be used to populate the `@timestamp` field.  If the format is `semi_structured_text`, this field must match the name of the appropriate extraction in the `grok_pattern`. Therefore, for semi-structured text, it is best not to specify this parameter unless `grok_pattern` is also specified.  For structured text, if you specify this parameter, the field must exist within the text.  If this parameter is not specified, the structure finder makes a decision about which field (if any) is the primary timestamp field. For structured text, it is not compulsory to have a timestamp in the text., timestamp_format: str # The Java time format of the timestamp field in the text. Only a subset of Java time format letter groups are supported:  * `a` * `d` * `dd` * `EEE` * `EEEE` * `H` * `HH` * `h` * `M` * `MM` * `MMM` * `MMMM` * `mm` * `ss` * `XX` * `XXX` * `yy` * `yyyy` * `zzz`  Additionally `S` letter groups (fractional seconds) of length one to nine are supported providing they occur after `ss` and are separated from the `ss` by a period (`.`), comma (`,`), or colon (`:`). Spacing and punctuation is also permitted with the exception a question mark (`?`), newline, and carriage return, together with literal text enclosed in single quotes. For example, `MM/dd HH.mm.ss,SSSSSS 'in' yyyy` is a valid override format.  One valuable use case for this parameter is when the format is semi-structured text, there are multiple timestamp formats in the text, and you know which format corresponds to the primary timestamp, but you do not want to specify the full `grok_pattern`. Another is when the timestamp format is one that the structure finder does not consider by default.  If this parameter is not specified, the structure finder chooses the best format from a built-in set.  If the special value `null` is specified, the structure finder will not look for a primary timestamp in the text. When the format is semi-structured text, this will result in the structure finder treating the text as single-line messages.}
@returns(200) {charset: str, ecs_compatibility: any, field_stats: map, format: any, grok_pattern: any, java_timestamp_formats: [str], joda_timestamp_formats: [str], ingest_pipeline: any, mappings: any, multiline_start_pattern: str, need_client_timezone: bool, num_lines_analyzed: num, num_messages_analyzed: num, sample_start: str, timestamp_field: any}
@example_request "{\n  \"messages\": [\n    \"[2024-03-05T10:52:36,256][INFO ][o.a.l.u.VectorUtilPanamaProvider] [laptop] Java vector incubator API enabled; uses preferredBitSize=128\",\n    \"[2024-03-05T10:52:41,038][INFO ][o.e.p.PluginsService     ] [laptop] loaded module [repository-url]\",\n    \"[2024-03-05T10:52:41,042][INFO ][o.e.p.PluginsService     ] [laptop] loaded module [rest-root]\",\n    \"[2024-03-05T10:52:41,043][INFO ][o.e.p.PluginsService     ] [laptop] loaded module [x-pack-core]\",\n    \"[2024-03-05T10:52:41,043][INFO ][o.e.p.PluginsService     ] [laptop] loaded module [x-pack-redact]\",\n    \"[2024-03-05T10:52:41,043][INFO ][o.e.p.PluginsService     ] [laptop] loaded module [ingest-user-agent]\",\n    \"[2024-03-05T10:52:41,044][INFO ][o.e.p.PluginsService     ] [laptop] loaded module [x-pack-monitoring]\",\n    \"[2024-03-05T10:52:41,044][INFO ][o.e.p.PluginsService     ] [laptop] loaded module [repository-s3]\",\n    \"[2024-03-05T10:52:41,044][INFO ][o.e.p.PluginsService     ] [laptop] loaded module [x-pack-analytics]\",\n    \"[2024-03-05T10:52:41,044][INFO ][o.e.p.PluginsService     ] [laptop] loaded module [x-pack-ent-search]\",\n    \"[2024-03-05T10:52:41,044][INFO ][o.e.p.PluginsService     ] [laptop] loaded module [x-pack-autoscaling]\",\n    \"[2024-03-05T10:52:41,044][INFO ][o.e.p.PluginsService     ] [laptop] loaded module [lang-painless]]\",\n    \"[2024-03-05T10:52:41,059][INFO ][o.e.p.PluginsService     ] [laptop] loaded module [lang-expression]\",\n    \"[2024-03-05T10:52:41,059][INFO ][o.e.p.PluginsService     ] [laptop] loaded module [x-pack-eql]\",\n    \"[2024-03-05T10:52:43,291][INFO ][o.e.e.NodeEnvironment    ] [laptop] heap size [16gb], compressed ordinary object pointers [true]\",\n    \"[2024-03-05T10:52:46,098][INFO ][o.e.x.s.Security         ] [laptop] Security is enabled\",\n    \"[2024-03-05T10:52:47,227][INFO ][o.e.x.p.ProfilingPlugin  ] [laptop] Profiling is enabled\",\n    \"[2024-03-05T10:52:47,259][INFO ][o.e.x.p.ProfilingPlugin  ] [laptop] profiling index templates will not be installed or reinstalled\",\n    \"[2024-03-05T10:52:47,755][INFO ][o.e.i.r.RecoverySettings ] [laptop] using rate limit [40mb] with [default=40mb, read=0b, write=0b, max=0b]\",\n    \"[2024-03-05T10:52:47,787][INFO ][o.e.d.DiscoveryModule    ] [laptop] using discovery type [multi-node] and seed hosts providers [settings]\",\n    \"[2024-03-05T10:52:49,188][INFO ][o.e.n.Node               ] [laptop] initialized\",\n    \"[2024-03-05T10:52:49,199][INFO ][o.e.n.Node               ] [laptop] starting ...\"\n  ]\n}"

@endpoint POST /_text_structure/find_structure
@desc Find the structure of a text file
@optional {charset: str # The text's character set. It must be a character set that is supported by the JVM that Elasticsearch uses. For example, `UTF-8`, `UTF-16LE`, `windows-1252`, or `EUC-JP`. If this parameter is not specified, the structure finder chooses an appropriate character set., column_names: any # If you have set format to `delimited`, you can specify the column names in a comma-separated list. If this parameter is not specified, the structure finder uses the column names from the header row of the text. If the text does not have a header role, columns are named "column1", "column2", "column3", for example., delimiter: str # If you have set `format` to `delimited`, you can specify the character used to delimit the values in each row. Only a single character is supported; the delimiter cannot have multiple characters. By default, the API considers the following possibilities: comma, tab, semi-colon, and pipe (`|`). In this default scenario, all rows must have the same number of fields for the delimited format to be detected. If you specify a delimiter, up to 10% of the rows can have a different number of columns than the first row., ecs_compatibility: str # The mode of compatibility with ECS compliant Grok patterns. Use this parameter to specify whether to use ECS Grok patterns instead of legacy ones when the structure finder creates a Grok pattern. Valid values are `disabled` and `v1`. This setting primarily has an impact when a whole message Grok pattern such as `%{CATALINALOG}` matches the input. If the structure finder identifies a common structure but has no idea of meaning then generic field names such as `path`, `ipaddress`, `field1`, and `field2` are used in the `grok_pattern` output, with the intention that a user who knows the meanings rename these fields before using it., explain: bool # If this parameter is set to `true`, the response includes a field named explanation, which is an array of strings that indicate how the structure finder produced its result. If the structure finder produces unexpected results for some text, use this query parameter to help you determine why the returned structure was chosen., format: str # The high level structure of the text. Valid values are `ndjson`, `xml`, `delimited`, and `semi_structured_text`. By default, the API chooses the format. In this default scenario, all rows must have the same number of fields for a delimited format to be detected. If the format is set to `delimited` and the delimiter is not set, however, the API tolerates up to 5% of rows that have a different number of columns than the first row., grok_pattern: str # If you have set `format` to `semi_structured_text`, you can specify a Grok pattern that is used to extract fields from every message in the text. The name of the timestamp field in the Grok pattern must match what is specified in the `timestamp_field` parameter. If that parameter is not specified, the name of the timestamp field in the Grok pattern must match "timestamp". If `grok_pattern` is not specified, the structure finder creates a Grok pattern., has_header_row: bool # If you have set `format` to `delimited`, you can use this parameter to indicate whether the column names are in the first row of the text. If this parameter is not specified, the structure finder guesses based on the similarity of the first row of the text to other rows., line_merge_size_limit: num # The maximum number of characters in a message when lines are merged to form messages while analyzing semi-structured text. If you have extremely long messages you may need to increase this, but be aware that this may lead to very long processing times if the way to group lines into messages is misdetected., lines_to_sample: num # The number of lines to include in the structural analysis, starting from the beginning of the text. The minimum is 2. If the value of this parameter is greater than the number of lines in the text, the analysis proceeds (as long as there are at least two lines in the text) for all of the lines.  NOTE: The number of lines and the variation of the lines affects the speed of the analysis. For example, if you upload text where the first 1000 lines are all variations on the same message, the analysis will find more commonality than would be seen with a bigger sample. If possible, however, it is more efficient to upload sample text with more variety in the first 1000 lines than to request analysis of 100000 lines to achieve some variety., quote: str # If you have set `format` to `delimited`, you can specify the character used to quote the values in each row if they contain newlines or the delimiter character. Only a single character is supported. If this parameter is not specified, the default value is a double quote (`"`). If your delimited text format does not use quoting, a workaround is to set this argument to a character that does not appear anywhere in the sample., should_trim_fields: bool # If you have set `format` to `delimited`, you can specify whether values between delimiters should have whitespace trimmed from them. If this parameter is not specified and the delimiter is pipe (`|`), the default value is `true`. Otherwise, the default value is `false`., should_parse_recursively: bool # If the format is `ndjson`, you can specify whether to parse nested JSON objects recursively. The nested objects are parsed to a maximum depth equal to the default value of the `index.mapping.depth.limit` setting. Anything beyond that depth is parsed as an `object` type field. For formats other than `ndjson`, this parameter is ignored., timeout: any # The maximum amount of time that the structure analysis can take. If the analysis is still running when the timeout expires then it will be stopped., timestamp_field: str # The name of the field that contains the primary timestamp of each record in the text. In particular, if the text were ingested into an index, this is the field that would be used to populate the `@timestamp` field.  If the `format` is `semi_structured_text`, this field must match the name of the appropriate extraction in the `grok_pattern`. Therefore, for semi-structured text, it is best not to specify this parameter unless `grok_pattern` is also specified.  For structured text, if you specify this parameter, the field must exist within the text.  If this parameter is not specified, the structure finder makes a decision about which field (if any) is the primary timestamp field. For structured text, it is not compulsory to have a timestamp in the text., timestamp_format: str # The Java time format of the timestamp field in the text.  Only a subset of Java time format letter groups are supported:  * `a` * `d` * `dd` * `EEE` * `EEEE` * `H` * `HH` * `h` * `M` * `MM` * `MMM` * `MMMM` * `mm` * `ss` * `XX` * `XXX` * `yy` * `yyyy` * `zzz`  Additionally `S` letter groups (fractional seconds) of length one to nine are supported providing they occur after `ss` and separated from the `ss` by a `.`, `,` or `:`. Spacing and punctuation is also permitted with the exception of `?`, newline and carriage return, together with literal text enclosed in single quotes. For example, `MM/dd HH.mm.ss,SSSSSS 'in' yyyy` is a valid override format.  One valuable use case for this parameter is when the format is semi-structured text, there are multiple timestamp formats in the text, and you know which format corresponds to the primary timestamp, but you do not want to specify the full `grok_pattern`. Another is when the timestamp format is one that the structure finder does not consider by default.  If this parameter is not specified, the structure finder chooses the best format from a built-in set.  If the special value `null` is specified the structure finder will not look for a primary timestamp in the text. When the format is semi-structured text this will result in the structure finder treating the text as single-line messages.}
@returns(200) {charset: str, has_header_row: bool, has_byte_order_marker: bool, format: str, field_stats: map, sample_start: str, num_messages_analyzed: num, mappings: any, quote: str, delimiter: str, need_client_timezone: bool, num_lines_analyzed: num, column_names: [str], explanation: [str], grok_pattern: any, multiline_start_pattern: str, exclude_lines_pattern: str, java_timestamp_formats: [str], joda_timestamp_formats: [str], timestamp_field: any, should_trim_fields: bool, ingest_pipeline: any}
@example_request "{\"name\": \"Leviathan Wakes\", \"author\": \"James S.A. Corey\", \"release_date\": \"2011-06-02\", \"page_count\": 561}\n{\"name\": \"Hyperion\", \"author\": \"Dan Simmons\", \"release_date\": \"1989-05-26\", \"page_count\": 482}\n{\"name\": \"Dune\", \"author\": \"Frank Herbert\", \"release_date\": \"1965-06-01\", \"page_count\": 604}\n{\"name\": \"Dune Messiah\", \"author\": \"Frank Herbert\", \"release_date\": \"1969-10-15\", \"page_count\": 331}\n{\"name\": \"Children of Dune\", \"author\": \"Frank Herbert\", \"release_date\": \"1976-04-21\", \"page_count\": 408}\n{\"name\": \"God Emperor of Dune\", \"author\": \"Frank Herbert\", \"release_date\": \"1981-05-28\", \"page_count\": 454}\n{\"name\": \"Consider Phlebas\", \"author\": \"Iain M. Banks\", \"release_date\": \"1987-04-23\", \"page_count\": 471}\n{\"name\": \"Pandora's Star\", \"author\": \"Peter F. Hamilton\", \"release_date\": \"2004-03-02\", \"page_count\": 768}\n{\"name\": \"Revelation Space\", \"author\": \"Alastair Reynolds\", \"release_date\": \"2000-03-15\", \"page_count\": 585}\n{\"name\": \"A Fire Upon the Deep\", \"author\": \"Vernor Vinge\", \"release_date\": \"1992-06-01\", \"page_count\": 613}\n{\"name\": \"Ender's Game\", \"author\": \"Orson Scott Card\", \"release_date\": \"1985-06-01\", \"page_count\": 324}\n{\"name\": \"1984\", \"author\": \"George Orwell\", \"release_date\": \"1985-06-01\", \"page_count\": 328}\n{\"name\": \"Fahrenheit 451\", \"author\": \"Ray Bradbury\", \"release_date\": \"1953-10-15\", \"page_count\": 227}\n{\"name\": \"Brave New World\", \"author\": \"Aldous Huxley\", \"release_date\": \"1932-06-01\", \"page_count\": 268}\n{\"name\": \"Foundation\", \"author\": \"Isaac Asimov\", \"release_date\": \"1951-06-01\", \"page_count\": 224}\n{\"name\": \"The Giver\", \"author\": \"Lois Lowry\", \"release_date\": \"1993-04-26\", \"page_count\": 208}\n{\"name\": \"Slaughterhouse-Five\", \"author\": \"Kurt Vonnegut\", \"release_date\": \"1969-06-01\", \"page_count\": 275}\n{\"name\": \"The Hitchhiker's Guide to the Galaxy\", \"author\": \"Douglas Adams\", \"release_date\": \"1979-10-12\", \"page_count\": 180}\n{\"name\": \"Snow Crash\", \"author\": \"Neal Stephenson\", \"release_date\": \"1992-06-01\", \"page_count\": 470}\n{\"name\": \"Neuromancer\", \"author\": \"William Gibson\", \"release_date\": \"1984-07-01\", \"page_count\": 271}\n{\"name\": \"The Handmaid's Tale\", \"author\": \"Margaret Atwood\", \"release_date\": \"1985-06-01\", \"page_count\": 311}\n{\"name\": \"Starship Troopers\", \"author\": \"Robert A. Heinlein\", \"release_date\": \"1959-12-01\", \"page_count\": 335}\n{\"name\": \"The Left Hand of Darkness\", \"author\": \"Ursula K. Le Guin\", \"release_date\": \"1969-06-01\", \"page_count\": 304}\n{\"name\": \"The Moon is a Harsh Mistress\", \"author\": \"Robert A. Heinlein\", \"release_date\": \"1966-04-01\", \"page_count\": 288}"

@endpoint GET /_text_structure/test_grok_pattern
@desc Test a Grok pattern
@required {grok_pattern: any # The Grok pattern to run on the text., text: [str] # The lines of text to run the Grok pattern on.}
@optional {ecs_compatibility: str # The mode of compatibility with ECS compliant Grok patterns. Use this parameter to specify whether to use ECS Grok patterns instead of legacy ones when the structure finder creates a Grok pattern. Valid values are `disabled` and `v1`.}
@returns(200) {matches: [map]}
@example_request "{\n  \"grok_pattern\": \"Hello %{WORD:first_name} %{WORD:last_name}\",\n  \"text\": [\n    \"Hello John Doe\",\n    \"this does not match\"\n  ]\n}"

@endpoint POST /_text_structure/test_grok_pattern
@desc Test a Grok pattern
@required {grok_pattern: any # The Grok pattern to run on the text., text: [str] # The lines of text to run the Grok pattern on.}
@optional {ecs_compatibility: str # The mode of compatibility with ECS compliant Grok patterns. Use this parameter to specify whether to use ECS Grok patterns instead of legacy ones when the structure finder creates a Grok pattern. Valid values are `disabled` and `v1`.}
@returns(200) {matches: [map]}
@example_request "{\n  \"grok_pattern\": \"Hello %{WORD:first_name} %{WORD:last_name}\",\n  \"text\": [\n    \"Hello John Doe\",\n    \"this does not match\"\n  ]\n}"

@endgroup

@group _transform
@endpoint GET /_transform/{transform_id}
@desc Get transforms
@required {transform_id: any # Identifier for the transform. It can be a transform identifier or a wildcard expression. You can get information for all transforms by using `_all`, by specifying `*` as the ``, or by omitting the ``.}
@optional {allow_no_match: bool # Specifies what to do when the request:  1. Contains wildcard expressions and there are no transforms that match. 2. Contains the _all string or no identifiers and there are no matches. 3. Contains wildcard expressions and there are only partial matches.  If this parameter is false, the request returns a 404 status code when there are no matches or only partial matches., from: num # Skips the specified number of transforms., size: num # Specifies the maximum number of transforms to obtain., exclude_generated: bool # Excludes fields that were automatically added when creating the transform. This allows the configuration to be in an acceptable format to be retrieved and then added to another cluster.}
@returns(200) {count: num, transforms: [map]}

@endpoint PUT /_transform/{transform_id}
@desc Create a transform
@required {transform_id: str # Identifier for the transform. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. It has a 64 character limit and must start and end with alphanumeric characters., dest: any # The destination for the transform., source: any # The source of the data for the transform.}
@optional {defer_validation: bool # When the transform is created, a series of validations occur to ensure its success. For example, there is a check for the existence of the source indices and a check that the destination index is not part of the source index pattern. You can use this parameter to skip the checks, for example when the source index does not exist until after the transform is created. The validations are always run when you start the transform, however, with the exception of privilege checks., timeout: any # Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error., description: str # Free text description of the transform., frequency: any=1m # The interval between checks for changes in the source indices when the transform is running continuously. Also determines the retry interval in the event of transient failures while the transform is searching or indexing. The minimum value is `1s` and the maximum is `1h`., latest: any # The latest method transforms the data by finding the latest document for each unique key., _meta: any # Defines optional transform metadata., pivot: any # The pivot method transforms the data by aggregating and grouping it. These objects define the group by fields and the aggregation to reduce the data., retention_policy: any # Defines a retention policy for the transform. Data that meets the defined criteria is deleted from the destination index., settings: any # Defines optional transform settings., sync: any # Defines the properties transforms require to run continuously.}
@returns(200) {acknowledged: bool}
@example_request "{\n  \"source\": {\n    \"index\": \"kibana_sample_data_ecommerce\",\n    \"query\": {\n      \"term\": {\n        \"geoip.continent_name\": {\n          \"value\": \"Asia\"\n        }\n      }\n    }\n  },\n  \"pivot\": {\n    \"group_by\": {\n      \"customer_id\": {\n        \"terms\": {\n          \"field\": \"customer_id\",\n          \"missing_bucket\": true\n        }\n      }\n    },\n    \"aggregations\": {\n      \"max_price\": {\n        \"max\": {\n          \"field\": \"taxful_total_price\"\n        }\n      }\n    }\n  },\n  \"description\": \"Maximum priced ecommerce data by customer_id in Asia\",\n  \"dest\": {\n    \"index\": \"kibana_sample_data_ecommerce_transform1\",\n    \"pipeline\": \"add_timestamp_pipeline\"\n  },\n  \"frequency\": \"5m\",\n  \"sync\": {\n    \"time\": {\n      \"field\": \"order_date\",\n      \"delay\": \"60s\"\n    }\n  },\n  \"retention_policy\": {\n    \"time\": {\n      \"field\": \"order_date\",\n      \"max_age\": \"30d\"\n    }\n  }\n}"

@endpoint DELETE /_transform/{transform_id}
@desc Delete a transform
@required {transform_id: str # Identifier for the transform.}
@optional {force: bool # If this value is false, the transform must be stopped before it can be deleted. If true, the transform is deleted regardless of its current state., delete_dest_index: bool # If this value is true, the destination index is deleted together with the transform. If false, the destination index will not be deleted, timeout: any # Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error.}
@returns(200) {acknowledged: bool}

@endpoint GET /_transform/_node_stats
@desc Get node stats
@returns(200) {total: any}

@endpoint GET /_transform
@desc Get transforms
@optional {allow_no_match: bool # Specifies what to do when the request:  1. Contains wildcard expressions and there are no transforms that match. 2. Contains the _all string or no identifiers and there are no matches. 3. Contains wildcard expressions and there are only partial matches.  If this parameter is false, the request returns a 404 status code when there are no matches or only partial matches., from: num # Skips the specified number of transforms., size: num # Specifies the maximum number of transforms to obtain., exclude_generated: bool # Excludes fields that were automatically added when creating the transform. This allows the configuration to be in an acceptable format to be retrieved and then added to another cluster.}
@returns(200) {count: num, transforms: [map]}

@endpoint GET /_transform/{transform_id}/_stats
@desc Get transform stats
@required {transform_id: any # Identifier for the transform. It can be a transform identifier or a wildcard expression. You can get information for all transforms by using `_all`, by specifying `*` as the ``, or by omitting the ``.}
@optional {allow_no_match: bool # Specifies what to do when the request:  1. Contains wildcard expressions and there are no transforms that match. 2. Contains the _all string or no identifiers and there are no matches. 3. Contains wildcard expressions and there are only partial matches.  If this parameter is false, the request returns a 404 status code when there are no matches or only partial matches., from: num # Skips the specified number of transforms., size: num # Specifies the maximum number of transforms to obtain., timeout: any # Controls the time to wait for the stats}
@returns(200) {count: num, transforms: [map]}

@endpoint GET /_transform/{transform_id}/_preview
@desc Preview a transform
@required {transform_id: str # Identifier for the transform to preview. If you specify this path parameter, you cannot provide transform configuration details in the request body.}
@optional {timeout: any # Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error., dest: any # The destination for the transform., description: str # Free text description of the transform., frequency: any=1m # The interval between checks for changes in the source indices when the transform is running continuously. Also determines the retry interval in the event of transient failures while the transform is searching or indexing. The minimum value is 1s and the maximum is 1h., pivot: any # The pivot method transforms the data by aggregating and grouping it. These objects define the group by fields and the aggregation to reduce the data., source: any # The source of the data for the transform., settings: any # Defines optional transform settings., sync: any # Defines the properties transforms require to run continuously., retention_policy: any # Defines a retention policy for the transform. Data that meets the defined criteria is deleted from the destination index., latest: any # The latest method transforms the data by finding the latest document for each unique key.}
@returns(200) {generated_dest_index: any, preview: [map]}
@example_request "{\n  \"source\": {\n    \"index\": \"kibana_sample_data_ecommerce\"\n  },\n  \"pivot\": {\n    \"group_by\": {\n      \"customer_id\": {\n        \"terms\": {\n          \"field\": \"customer_id\",\n          \"missing_bucket\": true\n        }\n      }\n    },\n    \"aggregations\": {\n      \"max_price\": {\n        \"max\": {\n          \"field\": \"taxful_total_price\"\n        }\n      }\n    }\n  }\n}"

@endpoint POST /_transform/{transform_id}/_preview
@desc Preview a transform
@required {transform_id: str # Identifier for the transform to preview. If you specify this path parameter, you cannot provide transform configuration details in the request body.}
@optional {timeout: any # Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error., dest: any # The destination for the transform., description: str # Free text description of the transform., frequency: any=1m # The interval between checks for changes in the source indices when the transform is running continuously. Also determines the retry interval in the event of transient failures while the transform is searching or indexing. The minimum value is 1s and the maximum is 1h., pivot: any # The pivot method transforms the data by aggregating and grouping it. These objects define the group by fields and the aggregation to reduce the data., source: any # The source of the data for the transform., settings: any # Defines optional transform settings., sync: any # Defines the properties transforms require to run continuously., retention_policy: any # Defines a retention policy for the transform. Data that meets the defined criteria is deleted from the destination index., latest: any # The latest method transforms the data by finding the latest document for each unique key.}
@returns(200) {generated_dest_index: any, preview: [map]}
@example_request "{\n  \"source\": {\n    \"index\": \"kibana_sample_data_ecommerce\"\n  },\n  \"pivot\": {\n    \"group_by\": {\n      \"customer_id\": {\n        \"terms\": {\n          \"field\": \"customer_id\",\n          \"missing_bucket\": true\n        }\n      }\n    },\n    \"aggregations\": {\n      \"max_price\": {\n        \"max\": {\n          \"field\": \"taxful_total_price\"\n        }\n      }\n    }\n  }\n}"

@endpoint GET /_transform/_preview
@desc Preview a transform
@optional {timeout: any # Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error., dest: any # The destination for the transform., description: str # Free text description of the transform., frequency: any=1m # The interval between checks for changes in the source indices when the transform is running continuously. Also determines the retry interval in the event of transient failures while the transform is searching or indexing. The minimum value is 1s and the maximum is 1h., pivot: any # The pivot method transforms the data by aggregating and grouping it. These objects define the group by fields and the aggregation to reduce the data., source: any # The source of the data for the transform., settings: any # Defines optional transform settings., sync: any # Defines the properties transforms require to run continuously., retention_policy: any # Defines a retention policy for the transform. Data that meets the defined criteria is deleted from the destination index., latest: any # The latest method transforms the data by finding the latest document for each unique key.}
@returns(200) {generated_dest_index: any, preview: [map]}
@example_request "{\n  \"source\": {\n    \"index\": \"kibana_sample_data_ecommerce\"\n  },\n  \"pivot\": {\n    \"group_by\": {\n      \"customer_id\": {\n        \"terms\": {\n          \"field\": \"customer_id\",\n          \"missing_bucket\": true\n        }\n      }\n    },\n    \"aggregations\": {\n      \"max_price\": {\n        \"max\": {\n          \"field\": \"taxful_total_price\"\n        }\n      }\n    }\n  }\n}"

@endpoint POST /_transform/_preview
@desc Preview a transform
@optional {timeout: any # Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error., dest: any # The destination for the transform., description: str # Free text description of the transform., frequency: any=1m # The interval between checks for changes in the source indices when the transform is running continuously. Also determines the retry interval in the event of transient failures while the transform is searching or indexing. The minimum value is 1s and the maximum is 1h., pivot: any # The pivot method transforms the data by aggregating and grouping it. These objects define the group by fields and the aggregation to reduce the data., source: any # The source of the data for the transform., settings: any # Defines optional transform settings., sync: any # Defines the properties transforms require to run continuously., retention_policy: any # Defines a retention policy for the transform. Data that meets the defined criteria is deleted from the destination index., latest: any # The latest method transforms the data by finding the latest document for each unique key.}
@returns(200) {generated_dest_index: any, preview: [map]}
@example_request "{\n  \"source\": {\n    \"index\": \"kibana_sample_data_ecommerce\"\n  },\n  \"pivot\": {\n    \"group_by\": {\n      \"customer_id\": {\n        \"terms\": {\n          \"field\": \"customer_id\",\n          \"missing_bucket\": true\n        }\n      }\n    },\n    \"aggregations\": {\n      \"max_price\": {\n        \"max\": {\n          \"field\": \"taxful_total_price\"\n        }\n      }\n    }\n  }\n}"

@endpoint POST /_transform/{transform_id}/_reset
@desc Reset a transform
@required {transform_id: str # Identifier for the transform. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. It has a 64 character limit and must start and end with alphanumeric characters.}
@optional {force: bool # If this value is `true`, the transform is reset regardless of its current state. If it's `false`, the transform must be stopped before it can be reset., timeout: any # Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error.}
@returns(200) {acknowledged: bool}

@endpoint POST /_transform/{transform_id}/_schedule_now
@desc Schedule a transform to start now
@required {transform_id: str # Identifier for the transform.}
@optional {timeout: any # Controls the time to wait for the scheduling to take place}
@returns(200) {acknowledged: bool}

@endpoint POST /_transform/set_upgrade_mode
@desc Set upgrade_mode for transform indices
@optional {enabled: bool # When `true`, it enables `upgrade_mode` which temporarily halts all transform tasks and prohibits new transform tasks from starting., timeout: any # The time to wait for the request to be completed.}
@returns(200) {acknowledged: bool}

@endpoint POST /_transform/{transform_id}/_start
@desc Start a transform
@required {transform_id: str # Identifier for the transform.}
@optional {timeout: any # Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error., from: str # Restricts the set of transformed entities to those changed after this time. Relative times like now-30d are supported. Only applicable for continuous transforms.}
@returns(200) {acknowledged: bool}

@endpoint POST /_transform/{transform_id}/_stop
@desc Stop transforms
@required {transform_id: str # Identifier for the transform. To stop multiple transforms, use a comma-separated list or a wildcard expression. To stop all transforms, use `_all` or `*` as the identifier.}
@optional {allow_no_match: bool # Specifies what to do when the request: contains wildcard expressions and there are no transforms that match; contains the `_all` string or no identifiers and there are no matches; contains wildcard expressions and there are only partial matches.  If it is true, the API returns a successful acknowledgement message when there are no matches. When there are only partial matches, the API stops the appropriate transforms.  If it is false, the request returns a 404 status code when there are no matches or only partial matches., force: bool # If it is true, the API forcefully stops the transforms., timeout: any # Period to wait for a response when `wait_for_completion` is `true`. If no response is received before the timeout expires, the request returns a timeout exception. However, the request continues processing and eventually moves the transform to a STOPPED state., wait_for_checkpoint: bool # If it is true, the transform does not completely stop until the current checkpoint is completed. If it is false, the transform stops as soon as possible., wait_for_completion: bool # If it is true, the API blocks until the indexer state completely stops. If it is false, the API returns immediately and the indexer is stopped asynchronously in the background.}
@returns(200) {acknowledged: bool}

@endpoint POST /_transform/{transform_id}/_update
@desc Update a transform
@required {transform_id: str # Identifier for the transform.}
@optional {defer_validation: bool # When true, deferrable validations are not run. This behavior may be desired if the source index does not exist until after the transform is created., timeout: any # Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error., dest: any # The destination for the transform., description: str # Free text description of the transform., frequency: any=1m # The interval between checks for changes in the source indices when the transform is running continuously. Also determines the retry interval in the event of transient failures while the transform is searching or indexing. The minimum value is 1s and the maximum is 1h., _meta: any # Defines optional transform metadata., source: any # The source of the data for the transform., settings: any # Defines optional transform settings., sync: any # Defines the properties transforms require to run continuously., retention_policy: any # Defines a retention policy for the transform. Data that meets the defined criteria is deleted from the destination index.}
@returns(200) {authorization: any, create_time: num, description: str, dest: any, frequency: any, id: any, latest: any, pivot: any, retention_policy: any, settings: any, source: any, sync: any, version: any, _meta: any}
@example_request "{\n  \"source\": {\n    \"index\": \"kibana_sample_data_ecommerce\",\n    \"query\": {\n      \"term\": {\n        \"geoip.continent_name\": {\n          \"value\": \"Asia\"\n        }\n      }\n    }\n  },\n  \"description\": \"Maximum priced ecommerce data by customer_id in Asia\",\n  \"dest\": {\n    \"index\": \"kibana_sample_data_ecommerce_transform_v2\",\n    \"pipeline\": \"add_timestamp_pipeline\"\n  },\n  \"frequency\": \"15m\",\n  \"sync\": {\n    \"time\": {\n      \"field\": \"order_date\",\n      \"delay\": \"120s\"\n    }\n  }\n}"

@endpoint POST /_transform/_upgrade
@desc Upgrade all transforms
@optional {dry_run: bool # When true, the request checks for updates but does not run them., timeout: any # Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error.}
@returns(200) {needs_update: num, no_action: num, updated: num}

@endgroup

@group {index}
@endpoint POST /{index}/_update/{id}
@desc Update a document
@required {index: str # The name of the target index. By default, the index is created automatically if it doesn't exist., id: str # A unique identifier for the document to be updated.}
@optional {if_primary_term: num # Only perform the operation if the document has this primary term., if_seq_no: num # Only perform the operation if the document has this sequence number., include_source_on_error: bool # True or false if to include the document source in the error message in case of parsing errors., lang: str # The script language., refresh: str # If 'true', Elasticsearch refreshes the affected shards to make this operation visible to search. If 'wait_for', it waits for a refresh to make this operation visible to search. If 'false', it does nothing with refreshes., require_alias: bool # If `true`, the destination must be an index alias., retry_on_conflict: num # The number of times the operation should be retried when a conflict occurs., routing: any # A custom value used to route operations to a specific shard., timeout: any # The period to wait for the following operations: dynamic mapping updates and waiting for active shards. Elasticsearch waits for at least the timeout period before failing. The actual wait time could be longer, particularly when multiple waits occur., wait_for_active_shards: any # The number of copies of each shard that must be active before proceeding with the operation. Set to 'all' or any positive integer up to the total number of shards in the index (`number_of_replicas`+1). The default value of `1` means it waits for each primary shard to be active., _source: any # If `false`, source retrieval is turned off. You can also specify a comma-separated list of the fields you want to retrieve., _source_excludes: any # The source fields you want to exclude., _source_includes: any # The source fields you want to retrieve., detect_noop: bool=true # If `true`, the `result` in the response is set to `noop` (no operation) when there are no changes to the document., doc: map # A partial update to an existing document. If both `doc` and `script` are specified, `doc` is ignored., doc_as_upsert: bool=false # If `true`, use the contents of 'doc' as the value of 'upsert'. NOTE: Using ingest pipelines with `doc_as_upsert` is not supported., script: any # The script to run to update the document., scripted_upsert: bool=false # If `true`, run the script whether or not the document exists., _source: any=true # If `false`, turn off source retrieval. You can also specify a comma-separated list of the fields you want to retrieve., upsert: map # If the document does not already exist, the contents of 'upsert' are inserted as a new document. If the document exists, the 'script' is run.}
@returns(200)
@example_request "{\n  \"script\" : {\n    \"source\": \"ctx._source.counter += params.count\",\n    \"lang\": \"painless\",\n    \"params\" : {\n      \"count\" : 4\n    }\n  }\n}"

@endpoint POST /{index}/_update_by_query
@desc Update documents
@required {index: any # A comma-separated list of data streams, indices, and aliases to search. It supports wildcards (`*`). To search all data streams or indices, omit this parameter or use `*` or `_all`.}
@optional {allow_no_indices: bool # A setting that does two separate checks on the index expression. If `false`, the request returns an error (1) if any wildcard expression (including `_all` and `*`) resolves to zero matching indices or (2) if the complete set of resolved indices, aliases or data streams is empty after all expressions are evaluated. If `true`, index expressions that resolve to no indices are allowed and the request returns an empty result., analyzer: str # The analyzer to use for the query string. This parameter can be used only when the `q` query string parameter is specified., analyze_wildcard: bool # If `true`, wildcard and prefix queries are analyzed. This parameter can be used only when the `q` query string parameter is specified., conflicts: str # The preferred behavior when update by query hits version conflicts: `abort` or `proceed`., default_operator: str # The default operator for query string query: `and` or `or`. This parameter can be used only when the `q` query string parameter is specified., df: str # The field to use as default where no field prefix is given in the query string. This parameter can be used only when the `q` query string parameter is specified., expand_wildcards: any # The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. It supports comma-separated values, such as `open,hidden`., from: num # Skips the specified number of documents., ignore_unavailable: bool # If `false`, the request returns an error if it targets a concrete (non-wildcarded) index, alias, or data stream that is missing, closed, or otherwise unavailable. If `true`, unavailable concrete targets are silently ignored., lenient: bool # If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored. This parameter can be used only when the `q` query string parameter is specified., max_docs: num # The maximum number of documents to process. It defaults to all documents. When set to a value less than or equal to `scroll_size` and `conflicts` is set to `abort`, a scroll will not be used to retrieve the results for the operation., pipeline: str # The ID of the pipeline to use to preprocess incoming documents. If the index has a default ingest pipeline specified, then setting the value to `_none` disables the default ingest pipeline for this request. If a final pipeline is configured it will always run, regardless of the value of this parameter., preference: str # The node or shard the operation should be performed on. It is random by default., q: str # A query in the Lucene query string syntax., refresh: bool # If `true`, Elasticsearch refreshes affected shards to make the operation visible to search after the request completes. This is different than the update API's `refresh` parameter, which causes just the shard that received the request to be refreshed., request_cache: bool # If `true`, the request cache is used for this request. It defaults to the index-level setting., requests_per_second: num # The throttle for this request in sub-requests per second., routing: any # A custom value used to route operations to a specific shard., scroll: any # The period to retain the search context for scrolling., scroll_size: num # The size of the scroll request that powers the operation., search_timeout: any # An explicit timeout for each search request. By default, there is no timeout., search_type: str # The type of the search operation. Available options include `query_then_fetch` and `dfs_query_then_fetch`., slices: any # The number of slices this task should be divided into., sort: [str] # A comma-separated list of : pairs., stats: [str] # The specific `tag` of the request for logging and statistical purposes., terminate_after: num # The maximum number of documents to collect for each shard. If a query reaches this limit, Elasticsearch terminates the query early. Elasticsearch collects documents before sorting.  IMPORTANT: Use with caution. Elasticsearch applies this parameter to each shard handling the request. When possible, let Elasticsearch perform early termination automatically. Avoid specifying this parameter for requests that target data streams with backing indices across multiple data tiers., timeout: any # The period each update request waits for the following operations: dynamic mapping updates, waiting for active shards. By default, it is one minute. This guarantees Elasticsearch waits for at least the timeout before failing. The actual wait time could be longer, particularly when multiple waits occur., version: bool # If `true`, returns the document version as part of a hit., version_type: bool # Should the document increment the version number (internal) on hit or not (reindex), wait_for_active_shards: any # The number of shard copies that must be active before proceeding with the operation. Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). The `timeout` parameter controls how long each write request waits for unavailable shards to become available. Both work exactly the way they work in the bulk API., wait_for_completion: bool # If `true`, the request blocks until the operation is complete. If `false`, Elasticsearch performs some preflight checks, launches the request, and returns a task ID that you can use to cancel or get the status of the task. Elasticsearch creates a record of this task as a document at `.tasks/task/${taskId}`., max_docs: num # The maximum number of documents to update., query: any # The documents to update using the Query DSL., script: any # The script to run to update the document source or metadata when updating., slice: any # Slice the request manually using the provided slice ID and total number of slices., conflicts: any=abort # The preferred behavior when update by query hits version conflicts: `abort` or `proceed`.}
@returns(200) {batches: num, failures: [map], noops: num, deleted: num, requests_per_second: num, retries: any, slices: [map], task: any, timed_out: bool, took: any, total: num, updated: num, version_conflicts: num, throttled: any, throttled_millis: any, throttled_until: any, throttled_until_millis: any}
@example_request "{\n  \"query\": { \n    \"term\": {\n      \"user.id\": \"kimchy\"\n    }\n  }\n}"

@endgroup

@group _update_by_query
@endpoint POST /_update_by_query/{task_id}/_rethrottle
@desc Throttle an update by query operation
@required {task_id: str # The ID for the task., requests_per_second: num # The throttle for this request in sub-requests per second. To turn off throttling, set it to `-1`.}
@returns(200) {nodes: map}

@endgroup

@group _watcher
@endpoint PUT /_watcher/watch/{watch_id}/_ack
@desc Acknowledge a watch
@required {watch_id: str # The watch identifier.}
@returns(200) {status: any}

@endpoint POST /_watcher/watch/{watch_id}/_ack
@desc Acknowledge a watch
@required {watch_id: str # The watch identifier.}
@returns(200) {status: any}

@endpoint PUT /_watcher/watch/{watch_id}/_ack/{action_id}
@desc Acknowledge a watch
@required {watch_id: str # The watch identifier., action_id: any # A comma-separated list of the action identifiers to acknowledge. If you omit this parameter, all of the actions of the watch are acknowledged.}
@returns(200) {status: any}

@endpoint POST /_watcher/watch/{watch_id}/_ack/{action_id}
@desc Acknowledge a watch
@required {watch_id: str # The watch identifier., action_id: any # A comma-separated list of the action identifiers to acknowledge. If you omit this parameter, all of the actions of the watch are acknowledged.}
@returns(200) {status: any}

@endpoint PUT /_watcher/watch/{watch_id}/_activate
@desc Activate a watch
@required {watch_id: str # The watch identifier.}
@returns(200) {status: any}

@endpoint POST /_watcher/watch/{watch_id}/_activate
@desc Activate a watch
@required {watch_id: str # The watch identifier.}
@returns(200) {status: any}

@endpoint PUT /_watcher/watch/{watch_id}/_deactivate
@desc Deactivate a watch
@required {watch_id: str # The watch identifier.}
@returns(200) {status: any}

@endpoint POST /_watcher/watch/{watch_id}/_deactivate
@desc Deactivate a watch
@required {watch_id: str # The watch identifier.}
@returns(200) {status: any}

@endpoint GET /_watcher/watch/{id}
@desc Get a watch
@required {id: str # The watch identifier.}
@returns(200) {found: bool, _id: any, status: any, watch: any, _primary_term: num, _seq_no: any, _version: any}

@endpoint PUT /_watcher/watch/{id}
@desc Create or update a watch
@required {id: str # The identifier for the watch.}
@optional {active: bool # The initial state of the watch. The default value is `true`, which means the watch is active by default., if_primary_term: num # Only update the watch if the last operation that has changed the watch has the specified primary term, if_seq_no: num # Only update the watch if the last operation that has changed the watch has the specified sequence number, version: num # Explicit version number for concurrency control, actions: map # The list of actions that will be run if the condition matches., condition: any # The condition that defines if the actions should be run., input: any # The input that defines the input that loads the data for the watch., metadata: any # Metadata JSON that will be copied into the history entries., throttle_period: any # The minimum time between actions being run. The default is 5 seconds. This default can be changed in the config file with the setting `xpack.watcher.throttle.period.default_period`. If both this value and the `throttle_period_in_millis` parameter are specified, Watcher uses the last parameter included in the request., throttle_period_in_millis: any # Minimum time in milliseconds between actions being run. Defaults to 5000. If both this value and the throttle_period parameter are specified, Watcher uses the last parameter included in the request., transform: any # The transform that processes the watch payload to prepare it for the watch actions., trigger: any # The trigger that defines when the watch should run.}
@returns(200) {created: bool, _id: any, _primary_term: num, _seq_no: any, _version: any}
@example_request "{\n  \"trigger\" : {\n    \"schedule\" : { \"cron\" : \"0 0/1 * * * ?\" }\n  },\n  \"input\" : {\n    \"search\" : {\n      \"request\" : {\n        \"indices\" : [\n          \"logstash*\"\n        ],\n        \"body\" : {\n          \"query\" : {\n            \"bool\" : {\n              \"must\" : {\n                \"match\": {\n                  \"response\": 404\n                }\n              },\n              \"filter\" : {\n                \"range\": {\n                  \"@timestamp\": {\n                    \"from\": \"{{ctx.trigger.scheduled_time}}||-5m\",\n                    \"to\": \"{{ctx.trigger.triggered_time}}\"\n                  }\n                }\n              }\n            }\n          }\n        }\n      }\n    }\n  },\n  \"condition\" : {\n    \"compare\" : { \"ctx.payload.hits.total\" : { \"gt\" : 0 }}\n  },\n  \"actions\" : {\n    \"email_admin\" : {\n      \"email\" : {\n        \"to\" : \"admin@domain.host.com\",\n        \"subject\" : \"404 recently encountered\"\n      }\n    }\n  }\n}"

@endpoint POST /_watcher/watch/{id}
@desc Create or update a watch
@required {id: str # The identifier for the watch.}
@optional {active: bool # The initial state of the watch. The default value is `true`, which means the watch is active by default., if_primary_term: num # Only update the watch if the last operation that has changed the watch has the specified primary term, if_seq_no: num # Only update the watch if the last operation that has changed the watch has the specified sequence number, version: num # Explicit version number for concurrency control, actions: map # The list of actions that will be run if the condition matches., condition: any # The condition that defines if the actions should be run., input: any # The input that defines the input that loads the data for the watch., metadata: any # Metadata JSON that will be copied into the history entries., throttle_period: any # The minimum time between actions being run. The default is 5 seconds. This default can be changed in the config file with the setting `xpack.watcher.throttle.period.default_period`. If both this value and the `throttle_period_in_millis` parameter are specified, Watcher uses the last parameter included in the request., throttle_period_in_millis: any # Minimum time in milliseconds between actions being run. Defaults to 5000. If both this value and the throttle_period parameter are specified, Watcher uses the last parameter included in the request., transform: any # The transform that processes the watch payload to prepare it for the watch actions., trigger: any # The trigger that defines when the watch should run.}
@returns(200) {created: bool, _id: any, _primary_term: num, _seq_no: any, _version: any}
@example_request "{\n  \"trigger\" : {\n    \"schedule\" : { \"cron\" : \"0 0/1 * * * ?\" }\n  },\n  \"input\" : {\n    \"search\" : {\n      \"request\" : {\n        \"indices\" : [\n          \"logstash*\"\n        ],\n        \"body\" : {\n          \"query\" : {\n            \"bool\" : {\n              \"must\" : {\n                \"match\": {\n                  \"response\": 404\n                }\n              },\n              \"filter\" : {\n                \"range\": {\n                  \"@timestamp\": {\n                    \"from\": \"{{ctx.trigger.scheduled_time}}||-5m\",\n                    \"to\": \"{{ctx.trigger.triggered_time}}\"\n                  }\n                }\n              }\n            }\n          }\n        }\n      }\n    }\n  },\n  \"condition\" : {\n    \"compare\" : { \"ctx.payload.hits.total\" : { \"gt\" : 0 }}\n  },\n  \"actions\" : {\n    \"email_admin\" : {\n      \"email\" : {\n        \"to\" : \"admin@domain.host.com\",\n        \"subject\" : \"404 recently encountered\"\n      }\n    }\n  }\n}"

@endpoint DELETE /_watcher/watch/{id}
@desc Delete a watch
@required {id: str # The watch identifier.}
@returns(200) {found: bool, _id: any, _version: any}

@endpoint PUT /_watcher/watch/{id}/_execute
@desc Run a watch
@required {id: str # The watch identifier.}
@optional {debug: bool # Defines whether the watch runs in debug mode., action_modes: map # Determines how to handle the watch actions as part of the watch execution., alternative_input: map # When present, the watch uses this object as a payload instead of executing its own input., ignore_condition: bool=false # When set to `true`, the watch execution uses the always condition. This can also be specified as an HTTP parameter., record_execution: bool=false # When set to `true`, the watch record representing the watch execution result is persisted to the `.watcher-history` index for the current time. In addition, the status of the watch is updated, possibly throttling subsequent runs. This can also be specified as an HTTP parameter., simulated_actions: any, trigger_data: any # This structure is parsed as the data of the trigger event that will be used during the watch execution., watch: any=null # When present, this watch is used instead of the one specified in the request. This watch is not persisted to the index and `record_execution` cannot be set.}
@returns(200) {_id: any, watch_record: any}
@example_request "{\n  \"trigger_data\" : { \n    \"triggered_time\" : \"now\",\n    \"scheduled_time\" : \"now\"\n  },\n  \"alternative_input\" : { \n    \"foo\" : \"bar\"\n  },\n  \"ignore_condition\" : true, \n  \"action_modes\" : {\n    \"my-action\" : \"force_simulate\" \n  },\n  \"record_execution\" : true \n}"

@endpoint POST /_watcher/watch/{id}/_execute
@desc Run a watch
@required {id: str # The watch identifier.}
@optional {debug: bool # Defines whether the watch runs in debug mode., action_modes: map # Determines how to handle the watch actions as part of the watch execution., alternative_input: map # When present, the watch uses this object as a payload instead of executing its own input., ignore_condition: bool=false # When set to `true`, the watch execution uses the always condition. This can also be specified as an HTTP parameter., record_execution: bool=false # When set to `true`, the watch record representing the watch execution result is persisted to the `.watcher-history` index for the current time. In addition, the status of the watch is updated, possibly throttling subsequent runs. This can also be specified as an HTTP parameter., simulated_actions: any, trigger_data: any # This structure is parsed as the data of the trigger event that will be used during the watch execution., watch: any=null # When present, this watch is used instead of the one specified in the request. This watch is not persisted to the index and `record_execution` cannot be set.}
@returns(200) {_id: any, watch_record: any}
@example_request "{\n  \"trigger_data\" : { \n    \"triggered_time\" : \"now\",\n    \"scheduled_time\" : \"now\"\n  },\n  \"alternative_input\" : { \n    \"foo\" : \"bar\"\n  },\n  \"ignore_condition\" : true, \n  \"action_modes\" : {\n    \"my-action\" : \"force_simulate\" \n  },\n  \"record_execution\" : true \n}"

@endpoint PUT /_watcher/watch/_execute
@desc Run a watch
@optional {debug: bool # Defines whether the watch runs in debug mode., action_modes: map # Determines how to handle the watch actions as part of the watch execution., alternative_input: map # When present, the watch uses this object as a payload instead of executing its own input., ignore_condition: bool=false # When set to `true`, the watch execution uses the always condition. This can also be specified as an HTTP parameter., record_execution: bool=false # When set to `true`, the watch record representing the watch execution result is persisted to the `.watcher-history` index for the current time. In addition, the status of the watch is updated, possibly throttling subsequent runs. This can also be specified as an HTTP parameter., simulated_actions: any, trigger_data: any # This structure is parsed as the data of the trigger event that will be used during the watch execution., watch: any=null # When present, this watch is used instead of the one specified in the request. This watch is not persisted to the index and `record_execution` cannot be set.}
@returns(200) {_id: any, watch_record: any}
@example_request "{\n  \"trigger_data\" : { \n    \"triggered_time\" : \"now\",\n    \"scheduled_time\" : \"now\"\n  },\n  \"alternative_input\" : { \n    \"foo\" : \"bar\"\n  },\n  \"ignore_condition\" : true, \n  \"action_modes\" : {\n    \"my-action\" : \"force_simulate\" \n  },\n  \"record_execution\" : true \n}"

@endpoint POST /_watcher/watch/_execute
@desc Run a watch
@optional {debug: bool # Defines whether the watch runs in debug mode., action_modes: map # Determines how to handle the watch actions as part of the watch execution., alternative_input: map # When present, the watch uses this object as a payload instead of executing its own input., ignore_condition: bool=false # When set to `true`, the watch execution uses the always condition. This can also be specified as an HTTP parameter., record_execution: bool=false # When set to `true`, the watch record representing the watch execution result is persisted to the `.watcher-history` index for the current time. In addition, the status of the watch is updated, possibly throttling subsequent runs. This can also be specified as an HTTP parameter., simulated_actions: any, trigger_data: any # This structure is parsed as the data of the trigger event that will be used during the watch execution., watch: any=null # When present, this watch is used instead of the one specified in the request. This watch is not persisted to the index and `record_execution` cannot be set.}
@returns(200) {_id: any, watch_record: any}
@example_request "{\n  \"trigger_data\" : { \n    \"triggered_time\" : \"now\",\n    \"scheduled_time\" : \"now\"\n  },\n  \"alternative_input\" : { \n    \"foo\" : \"bar\"\n  },\n  \"ignore_condition\" : true, \n  \"action_modes\" : {\n    \"my-action\" : \"force_simulate\" \n  },\n  \"record_execution\" : true \n}"

@endpoint GET /_watcher/settings
@desc Get Watcher index settings
@optional {master_timeout: any # The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error.}
@returns(200) {index: any}

@endpoint PUT /_watcher/settings
@desc Update Watcher index settings
@optional {master_timeout: any # The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error., timeout: any # The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error., index.auto_expand_replicas: str, index.number_of_replicas: num}
@returns(200) {acknowledged: bool}
@example_request "{\n  \"index.auto_expand_replicas\": \"0-4\"\n}"

@endpoint GET /_watcher/_query/watches
@desc Query watches
@optional {from: num=0 # The offset from the first result to fetch. It must be non-negative., size: num=10 # The number of hits to return. It must be non-negative., query: any # A query that filters the watches to be returned., sort: any # One or more fields used to sort the search results., search_after: any # Retrieve the next page of hits using a set of sort values from the previous page.}
@returns(200) {count: num, watches: [map]}

@endpoint POST /_watcher/_query/watches
@desc Query watches
@optional {from: num=0 # The offset from the first result to fetch. It must be non-negative., size: num=10 # The number of hits to return. It must be non-negative., query: any # A query that filters the watches to be returned., sort: any # One or more fields used to sort the search results., search_after: any # Retrieve the next page of hits using a set of sort values from the previous page.}
@returns(200) {count: num, watches: [map]}

@endpoint POST /_watcher/_start
@desc Start the watch service
@optional {master_timeout: any # Period to wait for a connection to the master node.}
@returns(200) {acknowledged: bool}

@endpoint GET /_watcher/stats
@desc Get Watcher statistics
@optional {emit_stacktraces: bool # Defines whether stack traces are generated for each watch that is running., metric: any # Defines which additional metrics are included in the response.}
@returns(200) {_nodes: any, cluster_name: any, manually_stopped: bool, stats: [map]}

@endpoint GET /_watcher/stats/{metric}
@desc Get Watcher statistics
@required {metric: any # Defines which additional metrics are included in the response.}
@optional {emit_stacktraces: bool # Defines whether stack traces are generated for each watch that is running., metric: any # Defines which additional metrics are included in the response.}
@returns(200) {_nodes: any, cluster_name: any, manually_stopped: bool, stats: [map]}

@endpoint POST /_watcher/_stop
@desc Stop the watch service
@optional {master_timeout: any # The period to wait for the master node. If the master node is not available before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`.}
@returns(200) {acknowledged: bool}

@endgroup

@group _xpack
@endpoint GET /_xpack
@desc Get information
@optional {categories: [str] # A comma-separated list of the information categories to include in the response. For example, `build,license,features`., accept_enterprise: bool # If used, this otherwise ignored parameter must be set to true, human: bool # Defines whether additional human-readable information is included in the response. In particular, it adds descriptions and a tag line.}
@returns(200) {build: any, features: any, license: any, tagline: str}

@endpoint GET /_xpack/usage
@desc Get usage information
@optional {master_timeout: any # The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`.}
@returns(200) {aggregate_metric: any, analytics: any, archive: any, watcher: any, ccr: any, data_frame: any, data_science: any, data_streams: any, data_tiers: any, enrich: any, eql: any, flattened: any, graph: any, gpu_vector_indexing: any, health_api: any, ilm: any, logstash: any, ml: any, monitoring: any, rollup: any, runtime_fields: any, spatial: any, searchable_snapshots: any, security: any, slm: any, sql: any, transform: any, vectors: any, voting_only: any}

@endgroup

@end
