@lap v0.3
# Machine-readable API spec. Each @endpoint block is one API call.
@api Qdrant API
@base http://localhost:6333
@version master
@auth ApiKey api-key in header | Bearer bearer
@endpoints 73
@hint download_for_search
@toc collections(56), root(1), telemetry(1), metrics(1), healthz(1), livez(1), readyz(1), issues(2), cluster(4), aliases(1), snapshots(4)

@group collections
@endpoint PUT /collections/{collection_name}/shards
@desc Create shard key
@required {collection_name: str # Name of the collection to create shards for, shard_key: any}
@optional {timeout: int # Wait for operation commit timeout in seconds. If timeout is reached - request will return with service error., shards_number: int(uint32) # How many shards to create for this key If not specified, will use the default value from config, replication_factor: int(uint32) # How many replicas to create for each shard If not specified, will use the default value from config, placement: [int(uint64)] # Placement of shards for this key List of peer ids, that can be used to place shards for this key If not specified, will be randomly placed among all peers, initial_state: any # Initial state of the shards for this key If not specified, will be `Initializing` first and then `Active` Warning: do not change this unless you know what you are doing}
@returns(200) {usage: any, time: num(float), status: str, result: bool} # successful operation
@errors {4XX: error}

@endpoint GET /collections/{collection_name}/shards
@desc List shard keys
@required {collection_name: str # Name of the collection to list shard keys for}
@returns(200) {usage: any, time: num(float), status: str, result: map{shard_keys: [map]?}} # successful operation
@errors {4XX: error}

@endpoint POST /collections/{collection_name}/shards/delete
@desc Delete shard key
@required {collection_name: str # Name of the collection to create shards for, shard_key: any}
@optional {timeout: int # Wait for operation commit timeout in seconds. If timeout is reached - request will return with service error.}
@returns(200) {usage: any, time: num(float), status: str, result: bool} # successful operation
@errors {4XX: error}

@endgroup

@group root
@endpoint GET /
@desc Returns information about the running Qdrant instance
@returns(200) {title: str, version: str, commit: str?} # Qdrant server version information
@errors {4XX: error}

@endgroup

@group telemetry
@endpoint GET /telemetry
@desc Collect telemetry data
@optional {anonymize: bool # If true, anonymize result, details_level: int # Level of details in telemetry data. Minimal level is 0, maximal is infinity, per_collection: bool # If true, include per-collection request statistics in the response, timeout: int=60 # Timeout for this request}
@returns(200) {usage: any, time: num(float), status: str, result: map{id: str, app: any, collections: map{number_of_collections: int(uint), max_collections: int(uint)?, collections: [any]?, snapshots: [map]?}, cluster: any, requests: any, memory: any, hardware: any}} # successful operation
@errors {4XX: error}

@endgroup

@group metrics
@endpoint GET /metrics
@desc Collect Prometheus metrics data
@optional {anonymize: bool # If true, anonymize result, per_collection: bool # If true, include per-collection request metrics with a collection label instead of global request metrics, timeout: int=60 # Timeout for this request}
@returns(200) Metrics data in Prometheus format
@errors {4XX: error}

@endgroup

@group healthz
@endpoint GET /healthz
@desc Kubernetes healthz endpoint
@returns(200) Healthz response
@errors {4XX: error}

@endgroup

@group livez
@endpoint GET /livez
@desc Kubernetes livez endpoint
@returns(200) Healthz response
@errors {4XX: error}

@endgroup

@group readyz
@endpoint GET /readyz
@desc Kubernetes readyz endpoint
@returns(200) Healthz response
@errors {4XX: error}

@endgroup

@group issues
@endpoint GET /issues
@desc Get issues
@returns(200) Successful response
@errors {4XX: error}

@endpoint DELETE /issues
@desc Clear issues
@returns(200) {usage: any, time: num(float), status: str, result: bool} # successful operation
@errors {4XX: error}

@endgroup

@group cluster
@endpoint GET /cluster
@desc Get cluster status info
@returns(200) {usage: any, time: num(float), status: str, result: any} # successful operation
@errors {4XX: error}

@endpoint GET /cluster/telemetry
@desc Collect cluster telemetry data
@optional {details_level: int # The level of detail to include in the response, timeout: int=60 # Timeout for this request}
@returns(200) {usage: any, time: num(float), status: str, result: map{collections: map, cluster: any}} # successful operation
@errors {4XX: error}

@endpoint POST /cluster/recover
@desc Tries to recover current peer Raft state.
@returns(200) {usage: any, time: num(float), status: str, result: bool} # successful operation
@errors {4XX: error}

@endpoint DELETE /cluster/peer/{peer_id}
@desc Remove peer from the cluster
@required {peer_id: int # Id of the peer}
@optional {timeout: int # Wait for operation commit timeout in seconds. If timeout is reached - request will return with service error., force: bool=false # If true - removes peer even if it has shards/replicas on it.}
@returns(200) {usage: any, time: num(float), status: str, result: bool} # successful operation
@errors {4XX: error}

@endgroup

@group collections
@endpoint GET /collections
@desc List collections
@returns(200) {usage: any, time: num(float), status: str, result: map{collections: [map]}} # successful operation
@errors {4XX: error}

@endpoint GET /collections/{collection_name}
@desc Collection info
@required {collection_name: str # Name of the collection to retrieve}
@returns(200) {usage: any, time: num(float), status: str, result: map{status: str, optimizer_status: any, warnings: [map], indexed_vectors_count: int(uint)?, points_count: int(uint)?, segments_count: int(uint), config: map{params: map{vectors: any, shard_number: int(uint32), sharding_method: any, replication_factor: int(uint32), write_consistency_factor: int(uint32), read_fan_out_factor: int(uint32)?, read_fan_out_delay_ms: int(uint64)?, on_disk_payload: bool, sparse_vectors: map?}, hnsw_config: map{m: int(uint), ef_construct: int(uint), full_scan_threshold: int(uint), max_indexing_threads: int(uint), on_disk: bool?, payload_m: int(uint)?, inline_storage: bool?}, optimizer_config: map{deleted_threshold: num(double), vacuum_min_vector_number: int(uint), default_segment_number: int(uint), max_segment_size: int(uint)?, memmap_threshold: int(uint)?, indexing_threshold: int(uint)?, flush_interval_sec: int(uint64), max_optimization_threads: int(uint)?, prevent_unoptimized: bool?}, wal_config: any, quantization_config: any, strict_mode_config: any, metadata: any}, payload_schema: map, update_queue: any}} # successful operation
@errors {4XX: error}

@endpoint PUT /collections/{collection_name}
@desc Create collection
@required {collection_name: str # Name of the new collection}
@optional {timeout: int # Wait for operation commit timeout in seconds. If timeout is reached - request will return with service error., vectors: any # Vector params separator for single and multiple vector modes Single mode:  { "size": 128, "distance": "Cosine" }  or multiple mode:  { "default": { "size": 128, "distance": "Cosine" } }, shard_number: int(uint32)=null # For auto sharding: Number of shards in collection. - Default is 1 for standalone, otherwise equal to the number of nodes - Minimum is 1  For custom sharding: Number of shards in collection per shard group. - Default is 1, meaning that each shard key will be mapped to a single shard - Minimum is 1, sharding_method: any=null # Sharding method Default is Auto - points are distributed across all available shards Custom - points are distributed across shards according to shard key, replication_factor: int(uint32)=null # Number of shards replicas. Default is 1 Minimum is 1, write_consistency_factor: int(uint32)=null # Defines how many replicas should apply the operation for us to consider it successful. Increasing this number will make the collection more resilient to inconsistencies, but will also make it fail if not enough replicas are available. Does not have any performance impact., on_disk_payload: bool=null # If true - point's payload will not be stored in memory. It will be read from the disk every time it is requested. This setting saves RAM by (slightly) increasing the response time. Note: those payload values that are involved in filtering and are indexed - remain in RAM.  Default: true, hnsw_config: any # Custom params for HNSW index. If none - values from service configuration file are used., wal_config: any # Custom params for WAL. If none - values from service configuration file are used., optimizers_config: any # Custom params for Optimizers.  If none - values from service configuration file are used., quantization_config: any=null # Quantization parameters. If none - quantization is disabled., sparse_vectors: map # Sparse vector data config., strict_mode_config: any # Strict-mode config., metadata: any # Arbitrary JSON metadata for the collection This can be used to store application-specific information such as creation time, migration data, inference model info, etc.}
@returns(200) {usage: any, time: num(float), status: str, result: bool} # successful operation
@errors {4XX: error}

@endpoint PATCH /collections/{collection_name}
@desc Update collection parameters
@required {collection_name: str # Name of the collection to update}
@optional {timeout: int # Wait for operation commit timeout in seconds. If timeout is reached - request will return with service error., vectors: any # Map of vector data parameters to update for each named vector. To update parameters in a collection having a single unnamed vector, use an empty string as name., optimizers_config: any # Custom params for Optimizers.  If none - it is left unchanged. This operation is blocking, it will only proceed once all current optimizations are complete, params: any # Collection base params. If none - it is left unchanged., hnsw_config: any # HNSW parameters to update for the collection index. If none - it is left unchanged., quantization_config: any=null # Quantization parameters to update. If none - it is left unchanged., sparse_vectors: any # Map of sparse vector data parameters to update for each sparse vector., strict_mode_config: any, metadata: any # Metadata to update for the collection. If provided, this will merge with existing metadata. To remove metadata, set it to an empty object.}
@returns(200) {usage: any, time: num(float), status: str, result: bool} # successful operation
@errors {4XX: error}

@endpoint DELETE /collections/{collection_name}
@desc Delete collection
@required {collection_name: str # Name of the collection to delete}
@optional {timeout: int # Wait for operation commit timeout in seconds. If timeout is reached - request will return with service error.}
@returns(200) {usage: any, time: num(float), status: str, result: bool} # successful operation
@errors {4XX: error}

@endpoint POST /collections/aliases
@desc Update aliases of the collections
@required {actions: [any]}
@optional {timeout: int # Wait for operation commit timeout in seconds. If timeout is reached - request will return with service error.}
@returns(200) {usage: any, time: num(float), status: str, result: bool} # successful operation
@errors {4XX: error}

@endpoint PUT /collections/{collection_name}/index
@desc Create index for field in collection
@required {collection_name: str # Name of the collection, field_name: str}
@optional {wait: bool # If true, wait for changes to actually happen, ordering: str # define ordering guarantees for the operation, timeout: int # Timeout for the operation, field_schema: any}
@returns(200) {usage: any, time: num(float), status: str, result: map{operation_id: int(uint64)?, status: str}} # successful operation
@errors {4XX: error}

@endpoint GET /collections/{collection_name}/exists
@desc Check the existence of a collection
@required {collection_name: str # Name of the collection}
@returns(200) {usage: any, time: num(float), status: str, result: map{exists: bool}} # successful operation
@errors {4XX: error}

@endpoint DELETE /collections/{collection_name}/index/{field_name}
@desc Delete index for field in collection
@required {collection_name: str # Name of the collection, field_name: str # Name of the field where to delete the index}
@optional {wait: bool # If true, wait for changes to actually happen, ordering: str # define ordering guarantees for the operation, timeout: int # Timeout for the operation}
@returns(200) {usage: any, time: num(float), status: str, result: map{operation_id: int(uint64)?, status: str}} # successful operation
@errors {4XX: error}

@endpoint GET /collections/{collection_name}/cluster
@desc Collection cluster info
@required {collection_name: str # Name of the collection to retrieve the cluster info for}
@returns(200) {usage: any, time: num(float), status: str, result: map{peer_id: int(uint64), shard_count: int(uint), local_shards: [map], remote_shards: [map], shard_transfers: [map], resharding_operations: [map]?}} # successful operation
@errors {4XX: error}

@endpoint POST /collections/{collection_name}/cluster
@desc Update collection cluster setup
@required {collection_name: str # Name of the collection on which to to apply the cluster update operation}
@optional {timeout: int # Wait for operation commit timeout in seconds. If timeout is reached - request will return with service error.}
@returns(200) {usage: any, time: num(float), status: str, result: bool} # successful operation
@errors {4XX: error}

@endpoint GET /collections/{collection_name}/optimizations
@desc Get optimization progress
@required {collection_name: str # Name of the collection}
@optional {with: str # Comma-separated list of optional fields to include in the response. Possible values: queued, completed, idle_segments., completed_limit: int=16 # Maximum number of completed optimizations to return. Ignored if `completed` is not in the `with` parameter.}
@returns(200) {usage: any, time: num(float), status: str, result: map{summary: map{queued_optimizations: int(uint), queued_segments: int(uint), queued_points: int(uint), idle_segments: int(uint)}, running: [map], queued: [map]?, completed: [map]?, idle_segments: [map]?}} # successful operation
@errors {4XX: error}

@endpoint GET /collections/{collection_name}/aliases
@desc List aliases for collection
@required {collection_name: str # Name of the collection}
@returns(200) {usage: any, time: num(float), status: str, result: map{aliases: [map]}} # successful operation
@errors {4XX: error}

@endgroup

@group aliases
@endpoint GET /aliases
@desc List collections aliases
@returns(200) {usage: any, time: num(float), status: str, result: map{aliases: [map]}} # successful operation
@errors {4XX: error}

@endgroup

@group collections
@endpoint POST /collections/{collection_name}/snapshots/upload
@desc Recover from an uploaded snapshot
@required {collection_name: str # Name of the collection}
@optional {wait: bool # If true, wait for changes to actually happen. If false - let changes happen in background. Default is true., priority: str # Defines source of truth for snapshot recovery, checksum: str # Optional SHA256 checksum to verify snapshot integrity before recovery.}
@returns(200) {time: num(float), status: str, result: bool} # successful operation
@returns(202) {time: num(float), status: str} # operation is accepted
@errors {4XX: error}

@endpoint PUT /collections/{collection_name}/snapshots/recover
@desc Recover from a snapshot
@required {collection_name: str # Name of the collection, location: str(uri) # Examples: - URL `http://localhost:8080/collections/my_collection/snapshots/my_snapshot` - Local path `file:///qdrant/snapshots/test_collection-2022-08-04-10-49-10.snapshot`}
@optional {wait: bool # If true, wait for changes to actually happen. If false - let changes happen in background. Default is true., priority: any=null # Defines which data should be used as a source of truth if there are other replicas in the cluster. If set to `Snapshot`, the snapshot will be used as a source of truth, and the current state will be overwritten. If set to `Replica`, the current state will be used as a source of truth, and after recovery if will be synchronized with the snapshot., checksum: str=null # Optional SHA256 checksum to verify snapshot integrity before recovery., api_key: str=null # Optional API key used when fetching the snapshot from a remote URL.}
@returns(200) {time: num(float), status: str, result: bool} # successful operation
@returns(202) {time: num(float), status: str} # operation is accepted
@errors {4XX: error}

@endpoint GET /collections/{collection_name}/snapshots
@desc List collection snapshots
@required {collection_name: str # Name of the collection}
@returns(200) {usage: any, time: num(float), status: str, result: [map]} # successful operation
@errors {4XX: error}

@endpoint POST /collections/{collection_name}/snapshots
@desc Create collection snapshot
@required {collection_name: str # Name of the collection for which to create a snapshot}
@optional {wait: bool # If true, wait for changes to actually happen. If false - let changes happen in background. Default is true.}
@returns(200) {time: num(float), status: str, result: map{name: str, creation_time: str(partial-date-time)?, size: int(uint64), checksum: str?}} # successful operation
@returns(202) {time: num(float), status: str} # operation is accepted
@errors {4XX: error}

@endpoint DELETE /collections/{collection_name}/snapshots/{snapshot_name}
@desc Delete collection snapshot
@required {collection_name: str # Name of the collection for which to delete a snapshot, snapshot_name: str # Name of the snapshot to delete}
@optional {wait: bool # If true, wait for changes to actually happen. If false - let changes happen in background. Default is true.}
@returns(200) {time: num(float), status: str, result: bool} # successful operation
@returns(202) {time: num(float), status: str} # operation is accepted
@errors {4XX: error}

@endpoint GET /collections/{collection_name}/snapshots/{snapshot_name}
@desc Download collection snapshot
@required {collection_name: str # Name of the collection, snapshot_name: str # Name of the snapshot to download}
@returns(200) Snapshot file
@errors {4XX: error}

@endgroup

@group snapshots
@endpoint GET /snapshots
@desc List of storage snapshots
@returns(200) {usage: any, time: num(float), status: str, result: [map]} # successful operation
@errors {4XX: error}

@endpoint POST /snapshots
@desc Create storage snapshot
@optional {wait: bool # If true, wait for changes to actually happen. If false - let changes happen in background. Default is true.}
@returns(200) {time: num(float), status: str, result: map{name: str, creation_time: str(partial-date-time)?, size: int(uint64), checksum: str?}} # successful operation
@returns(202) {time: num(float), status: str} # operation is accepted
@errors {4XX: error}

@endpoint DELETE /snapshots/{snapshot_name}
@desc Delete storage snapshot
@required {snapshot_name: str # Name of the full snapshot to delete}
@optional {wait: bool # If true, wait for changes to actually happen. If false - let changes happen in background. Default is true.}
@returns(200) {time: num(float), status: str, result: bool} # successful operation
@returns(202) {time: num(float), status: str} # operation is accepted
@errors {4XX: error}

@endpoint GET /snapshots/{snapshot_name}
@desc Download storage snapshot
@required {snapshot_name: str # Name of the snapshot to download}
@returns(200) Snapshot file
@errors {4XX: error}

@endgroup

@group collections
@endpoint GET /collections/{collection_name}/shards/{shard_id}/snapshot
@desc Download shard snapshot
@required {collection_name: str # Name of the collection, shard_id: int # Id of the shard}
@returns(200) Snapshot file
@errors {4XX: error}

@endpoint POST /collections/{collection_name}/shards/{shard_id}/snapshots/upload
@desc Recover shard from an uploaded snapshot
@required {collection_name: str # Name of the collection, shard_id: int # Id of the shard to recover}
@optional {wait: bool # If true, wait for changes to actually happen. If false - let changes happen in background. Default is true., priority: str # Defines source of truth for snapshot recovery, checksum: str # Optional SHA256 checksum to verify snapshot integrity before recovery.}
@returns(200) {time: num(float), status: str, result: bool} # successful operation
@returns(202) {time: num(float), status: str} # operation is accepted
@errors {4XX: error}

@endpoint PUT /collections/{collection_name}/shards/{shard_id}/snapshots/recover
@desc Recover from a snapshot
@required {collection_name: str # Name of the collection, shard_id: int # Id of the shard to recover, location: any}
@optional {wait: bool # If true, wait for changes to actually happen. If false - let changes happen in background. Default is true., priority: any=null, checksum: str=null # Optional SHA256 checksum to verify snapshot integrity before recovery., api_key: str=null # Optional API key used when fetching the snapshot from a remote URL.}
@returns(200) {time: num(float), status: str, result: bool} # successful operation
@returns(202) {time: num(float), status: str} # operation is accepted
@errors {4XX: error}

@endpoint GET /collections/{collection_name}/shards/{shard_id}/snapshots
@desc List shards snapshots for a collection
@required {collection_name: str # Name of the collection, shard_id: int # Id of the shard}
@returns(200) {usage: any, time: num(float), status: str, result: [map]} # successful operation
@errors {4XX: error}

@endpoint POST /collections/{collection_name}/shards/{shard_id}/snapshots
@desc Create shard snapshot
@required {collection_name: str # Name of the collection for which to create a snapshot, shard_id: int # Id of the shard}
@optional {wait: bool # If true, wait for changes to actually happen. If false - let changes happen in background. Default is true.}
@returns(200) {time: num(float), status: str, result: map{name: str, creation_time: str(partial-date-time)?, size: int(uint64), checksum: str?}} # successful operation
@returns(202) {time: num(float), status: str} # operation is accepted
@errors {4XX: error}

@endpoint DELETE /collections/{collection_name}/shards/{shard_id}/snapshots/{snapshot_name}
@desc Delete shard snapshot
@required {collection_name: str # Name of the collection for which to delete a snapshot, shard_id: int # Id of the shard, snapshot_name: str # Name of the snapshot to delete}
@optional {wait: bool # If true, wait for changes to actually happen. If false - let changes happen in background. Default is true.}
@returns(200) {time: num(float), status: str, result: bool} # successful operation
@returns(202) {time: num(float), status: str} # operation is accepted
@errors {4XX: error}

@endpoint GET /collections/{collection_name}/shards/{shard_id}/snapshots/{snapshot_name}
@desc Download collection snapshot
@required {collection_name: str # Name of the collection, shard_id: int # Id of the shard, snapshot_name: str # Name of the snapshot to download}
@returns(200) Snapshot file
@errors {4XX: error}

@endpoint GET /collections/{collection_name}/points/{id}
@desc Get point
@required {collection_name: str # Name of the collection to retrieve from, id: any # Id of the point}
@optional {consistency: any # Define read consistency guarantees for the operation}
@returns(200) {usage: any, time: num(float), status: str, result: map{id: any, payload: any, vector: any, shard_key: any, order_value: any}} # successful operation
@errors {4XX: error}

@endpoint POST /collections/{collection_name}/points
@desc Get points
@required {collection_name: str # Name of the collection to retrieve from, ids: [any] # Look for points with ids}
@optional {consistency: any # Define read consistency guarantees for the operation, timeout: int # If set, overrides global timeout for this request. Unit is seconds., shard_key: any # Specify in which shards to look for the points, if not specified - look in all shards, with_payload: any # Select which payload to return with the response. Default is true., with_vector: any # Options for specifying which vector to include}
@returns(200) {usage: any, time: num(float), status: str, result: [map]} # successful operation
@errors {4XX: error}

@endpoint PUT /collections/{collection_name}/points
@desc Upsert points
@required {collection_name: str # Name of the collection to update from}
@optional {wait: bool # If true, wait for changes to actually happen, ordering: str # define ordering guarantees for the operation, timeout: int # Timeout for the operation}
@returns(200) {usage: any, time: num(float), status: str, result: map{operation_id: int(uint64)?, status: str}} # successful operation
@errors {4XX: error}

@endpoint POST /collections/{collection_name}/points/delete
@desc Delete points
@required {collection_name: str # Name of the collection to delete from}
@optional {wait: bool # If true, wait for changes to actually happen, ordering: str # define ordering guarantees for the operation, timeout: int # Timeout for the operation}
@returns(200) {usage: any, time: num(float), status: str, result: map{operation_id: int(uint64)?, status: str}} # successful operation
@errors {4XX: error}

@endpoint PUT /collections/{collection_name}/points/vectors
@desc Update vectors
@required {collection_name: str # Name of the collection to update from, points: [map{id!: any, vector!: any}] # Points with named vectors}
@optional {wait: bool # If true, wait for changes to actually happen, ordering: str # define ordering guarantees for the operation, timeout: int # Timeout for the operation, shard_key: any, update_filter: any}
@returns(200) {usage: any, time: num(float), status: str, result: map{operation_id: int(uint64)?, status: str}} # successful operation
@errors {4XX: error}

@endpoint POST /collections/{collection_name}/points/vectors/delete
@desc Delete vectors
@required {collection_name: str # Name of the collection to delete from, vector: [str] # Vector names}
@optional {wait: bool # If true, wait for changes to actually happen, ordering: str # define ordering guarantees for the operation, timeout: int # Timeout for the operation, points: [any] # Deletes values from each point in this list, filter: any # Deletes values from points that satisfy this filter condition, shard_key: any}
@returns(200) {usage: any, time: num(float), status: str, result: map{operation_id: int(uint64)?, status: str}} # successful operation
@errors {4XX: error}

@endpoint POST /collections/{collection_name}/points/payload
@desc Set payload
@required {collection_name: str # Name of the collection to set from, payload: map}
@optional {wait: bool # If true, wait for changes to actually happen, ordering: str # define ordering guarantees for the operation, timeout: int # Timeout for the operation, points: [any] # Assigns payload to each point in this list, filter: any # Assigns payload to each point that satisfy this filter condition, shard_key: any, key: str # Assigns payload to each point that satisfy this path of property}
@returns(200) {usage: any, time: num(float), status: str, result: map{operation_id: int(uint64)?, status: str}} # successful operation
@errors {4XX: error}

@endpoint PUT /collections/{collection_name}/points/payload
@desc Overwrite payload
@required {collection_name: str # Name of the collection to set from, payload: map}
@optional {wait: bool # If true, wait for changes to actually happen, ordering: str # define ordering guarantees for the operation, timeout: int # Timeout for the operation, points: [any] # Assigns payload to each point in this list, filter: any # Assigns payload to each point that satisfy this filter condition, shard_key: any, key: str # Assigns payload to each point that satisfy this path of property}
@returns(200) {usage: any, time: num(float), status: str, result: map{operation_id: int(uint64)?, status: str}} # successful operation
@errors {4XX: error}

@endpoint POST /collections/{collection_name}/points/payload/delete
@desc Delete payload
@required {collection_name: str # Name of the collection to delete from, keys: [str] # List of payload keys to remove from payload}
@optional {wait: bool # If true, wait for changes to actually happen, ordering: str # define ordering guarantees for the operation, timeout: int # Timeout for the operation, points: [any] # Deletes values from each point in this list, filter: any # Deletes values from points that satisfy this filter condition, shard_key: any}
@returns(200) {usage: any, time: num(float), status: str, result: map{operation_id: int(uint64)?, status: str}} # successful operation
@errors {4XX: error}

@endpoint POST /collections/{collection_name}/points/payload/clear
@desc Clear payload
@required {collection_name: str # Name of the collection to clear payload from}
@optional {wait: bool # If true, wait for changes to actually happen, ordering: str # define ordering guarantees for the operation, timeout: int # Timeout for the operation}
@returns(200) {usage: any, time: num(float), status: str, result: map{operation_id: int(uint64)?, status: str}} # successful operation
@errors {4XX: error}

@endpoint POST /collections/{collection_name}/points/batch
@desc Batch update points
@required {collection_name: str # Name of the collection to apply operations on, operations: [any]}
@optional {wait: bool # If true, wait for changes to actually happen, ordering: str # define ordering guarantees for the operation, timeout: int # Timeout for the operation}
@returns(200) {usage: any, time: num(float), status: str, result: [map]} # successful operation
@errors {4XX: error}

@endpoint POST /collections/{collection_name}/points/scroll
@desc Scroll points
@required {collection_name: str # Name of the collection to retrieve from}
@optional {consistency: any # Define read consistency guarantees for the operation, timeout: int # If set, overrides global timeout for this request. Unit is seconds., shard_key: any # Specify in which shards to look for the points, if not specified - look in all shards, offset: any # Start ID to read points from., limit: int(uint) # Page size. Default: 10, filter: any # Look only for points which satisfies this conditions. If not provided - all points., with_payload: any # Select which payload to return with the response. Default is true., with_vector: any # Options for specifying which vector to include, order_by: any # Order the records by a payload field.}
@returns(200) {usage: any, time: num(float), status: str, result: map{points: [map], next_page_offset: any}} # successful operation
@errors {4XX: error}

@endpoint POST /collections/{collection_name}/points/search
@desc Search points
@required {collection_name: str # Name of the collection to search in, vector: any # Vector data separator for named and unnamed modes Unnamed mode:  { "vector": [1.0, 2.0, 3.0] }  or named mode:  { "vector": { "vector": [1.0, 2.0, 3.0], "name": "image-embeddings" } }, limit: int(uint) # Max number of result to return}
@optional {consistency: any # Define read consistency guarantees for the operation, timeout: int # If set, overrides global timeout for this request. Unit is seconds., shard_key: any # Specify in which shards to look for the points, if not specified - look in all shards, filter: any # Look only for points which satisfies this conditions, params: any # Additional search params, offset: int(uint) # Offset of the first result to return. May be used to paginate results. Note: large offset values may cause performance issues., with_payload: any # Select which payload to return with the response. Default is false., with_vector: any=null # Options for specifying which vectors to include into response. Default is false., score_threshold: num(float) # Define a minimal score threshold for the result. If defined, less similar results will not be returned. Score of the returned result might be higher or smaller than the threshold depending on the Distance function used. E.g. for cosine similarity only higher scores will be returned.}
@returns(200) {usage: any, time: num(float), status: str, result: [map]} # successful operation
@errors {4XX: error}

@endpoint POST /collections/{collection_name}/points/search/batch
@desc Search batch points
@required {collection_name: str # Name of the collection to search in, searches: [map{shard_key: any, vector!: any, filter: any, params: any, limit!: int(uint), offset: int(uint), with_payload: any, with_vector: any, score_threshold: num(float)}]}
@optional {consistency: any # Define read consistency guarantees for the operation, timeout: int # If set, overrides global timeout for this request. Unit is seconds.}
@returns(200) {usage: any, time: num(float), status: str, result: [[map]]} # successful operation
@errors {4XX: error}

@endpoint POST /collections/{collection_name}/points/search/groups
@desc Search point groups
@required {collection_name: str # Name of the collection to search in, vector: any # Vector data separator for named and unnamed modes Unnamed mode:  { "vector": [1.0, 2.0, 3.0] }  or named mode:  { "vector": { "vector": [1.0, 2.0, 3.0], "name": "image-embeddings" } }, group_by: str # Payload field to group by, must be a string or number field. If the field contains more than 1 value, all values will be used for grouping. One point can be in multiple groups., group_size: int(uint32) # Maximum amount of points to return per group, limit: int(uint32) # Maximum amount of groups to return}
@optional {consistency: any # Define read consistency guarantees for the operation, timeout: int # If set, overrides global timeout for this request. Unit is seconds., shard_key: any # Specify in which shards to look for the points, if not specified - look in all shards, filter: any # Look only for points which satisfies this conditions, params: any # Additional search params, with_payload: any # Select which payload to return with the response. Default is false., with_vector: any=null # Options for specifying which vectors to include into response. Default is false., score_threshold: num(float) # Define a minimal score threshold for the result. If defined, less similar results will not be returned. Score of the returned result might be higher or smaller than the threshold depending on the Distance function used. E.g. for cosine similarity only higher scores will be returned., with_lookup: any # Look for points in another collection using the group ids}
@returns(200) {usage: any, time: num(float), status: str, result: map{groups: [map]}} # successful operation
@errors {4XX: error}

@endpoint POST /collections/{collection_name}/points/recommend
@desc Recommend points
@required {collection_name: str # Name of the collection to search in, limit: int(uint) # Max number of result to return}
@optional {consistency: any # Define read consistency guarantees for the operation, timeout: int # If set, overrides global timeout for this request. Unit is seconds., shard_key: any # Specify in which shards to look for the points, if not specified - look in all shards, positive: [any]= # Look for vectors closest to those, negative: [any]= # Try to avoid vectors like this, strategy: any # How to use positive and negative examples to find the results, filter: any # Look only for points which satisfies this conditions, params: any # Additional search params, offset: int(uint) # Offset of the first result to return. May be used to paginate results. Note: large offset values may cause performance issues., with_payload: any # Select which payload to return with the response. Default is false., with_vector: any=null # Options for specifying which vectors to include into response. Default is false., score_threshold: num(float) # Define a minimal score threshold for the result. If defined, less similar results will not be returned. Score of the returned result might be higher or smaller than the threshold depending on the Distance function used. E.g. for cosine similarity only higher scores will be returned., using: any=null # Define which vector to use for recommendation, if not specified - try to use default vector, lookup_from: any=null # The location used to lookup vectors. If not specified - use current collection. Note: the other collection should have the same vector size as the current collection}
@returns(200) {usage: any, time: num(float), status: str, result: [map]} # successful operation
@errors {4XX: error}

@endpoint POST /collections/{collection_name}/points/recommend/batch
@desc Recommend batch points
@required {collection_name: str # Name of the collection to search in, searches: [map{shard_key: any, positive: [any], negative: [any], strategy: any, filter: any, params: any, limit!: int(uint), offset: int(uint), with_payload: any, with_vector: any, score_threshold: num(float), using: any, lookup_from: any}]}
@optional {consistency: any # Define read consistency guarantees for the operation, timeout: int # If set, overrides global timeout for this request. Unit is seconds.}
@returns(200) {usage: any, time: num(float), status: str, result: [[map]]} # successful operation
@errors {4XX: error}

@endpoint POST /collections/{collection_name}/points/recommend/groups
@desc Recommend point groups
@required {collection_name: str # Name of the collection to search in, group_by: str # Payload field to group by, must be a string or number field. If the field contains more than 1 value, all values will be used for grouping. One point can be in multiple groups., group_size: int(uint32) # Maximum amount of points to return per group, limit: int(uint32) # Maximum amount of groups to return}
@optional {consistency: any # Define read consistency guarantees for the operation, timeout: int # If set, overrides global timeout for this request. Unit is seconds., shard_key: any # Specify in which shards to look for the points, if not specified - look in all shards, positive: [any]= # Look for vectors closest to those, negative: [any]= # Try to avoid vectors like this, strategy: any=null # How to use positive and negative examples to find the results, filter: any # Look only for points which satisfies this conditions, params: any # Additional search params, with_payload: any # Select which payload to return with the response. Default is false., with_vector: any=null # Options for specifying which vectors to include into response. Default is false., score_threshold: num(float) # Define a minimal score threshold for the result. If defined, less similar results will not be returned. Score of the returned result might be higher or smaller than the threshold depending on the Distance function used. E.g. for cosine similarity only higher scores will be returned., using: any=null # Define which vector to use for recommendation, if not specified - try to use default vector, lookup_from: any=null # The location used to lookup vectors. If not specified - use current collection. Note: the other collection should have the same vector size as the current collection, with_lookup: any # Look for points in another collection using the group ids}
@returns(200) {usage: any, time: num(float), status: str, result: map{groups: [map]}} # successful operation
@errors {4XX: error}

@endpoint POST /collections/{collection_name}/points/discover
@desc Discover points
@required {collection_name: str # Name of the collection to search in, limit: int(uint) # Max number of result to return}
@optional {consistency: any # Define read consistency guarantees for the operation, timeout: int # If set, overrides global timeout for this request. Unit is seconds., shard_key: any # Specify in which shards to look for the points, if not specified - look in all shards, target: any # Look for vectors closest to this.  When using the target (with or without context), the integer part of the score represents the rank with respect to the context, while the decimal part of the score relates to the distance to the target., context: [map{positive!: any, negative!: any}] # Pairs of { positive, negative } examples to constrain the search.  When using only the context (without a target), a special search - called context search - is performed where pairs of points are used to generate a loss that guides the search towards the zone where most positive examples overlap. This means that the score minimizes the scenario of finding a point closer to a negative than to a positive part of a pair.  Since the score of a context relates to loss, the maximum score a point can get is 0.0, and it becomes normal that many points can have a score of 0.0.  For discovery search (when including a target), the context part of the score for each pair is calculated +1 if the point is closer to a positive than to a negative part of a pair, and -1 otherwise., filter: any # Look only for points which satisfies this conditions, params: any # Additional search params, offset: int(uint) # Offset of the first result to return. May be used to paginate results. Note: large offset values may cause performance issues., with_payload: any # Select which payload to return with the response. Default is false., with_vector: any # Options for specifying which vectors to include into response. Default is false., using: any=null # Define which vector to use for recommendation, if not specified - try to use default vector, lookup_from: any=null # The location used to lookup vectors. If not specified - use current collection. Note: the other collection should have the same vector size as the current collection}
@returns(200) {usage: any, time: num(float), status: str, result: [map]} # successful operation
@errors {4XX: error}

@endpoint POST /collections/{collection_name}/points/discover/batch
@desc Discover batch points
@required {collection_name: str # Name of the collection to search in, searches: [map{shard_key: any, target: any, context: [map], filter: any, params: any, limit!: int(uint), offset: int(uint), with_payload: any, with_vector: any, using: any, lookup_from: any}]}
@optional {consistency: any # Define read consistency guarantees for the operation, timeout: int # If set, overrides global timeout for this request. Unit is seconds.}
@returns(200) {usage: any, time: num(float), status: str, result: [[map]]} # successful operation
@errors {4XX: error}

@endpoint POST /collections/{collection_name}/points/count
@desc Count points
@required {collection_name: str # Name of the collection to count in}
@optional {consistency: any # Define read consistency guarantees for the operation, timeout: int # If set, overrides global timeout for this request. Unit is seconds., shard_key: any # Specify in which shards to look for the points, if not specified - look in all shards, filter: any # Look only for points which satisfies this conditions, exact: bool=true # If true, count exact number of points. If false, count approximate number of points faster. Approximate count might be unreliable during the indexing process. Default: true}
@returns(200) {usage: any, time: num(float), status: str, result: map{count: int(uint)}} # successful operation
@errors {4XX: error}

@endpoint POST /collections/{collection_name}/facet
@desc Facet a payload key with a given filter.
@required {collection_name: str # Name of the collection to facet in, key: str # Payload key to use for faceting.}
@optional {consistency: any # Define read consistency guarantees for the operation, timeout: int # If set, overrides global timeout for this request. Unit is seconds., shard_key: any, limit: int(uint) # Max number of hits to return. Default is 10., filter: any # Filter conditions - only consider points that satisfy these conditions., exact: bool # Whether to do a more expensive exact count for each of the values in the facet. Default is false.}
@returns(200) {usage: any, time: num(float), status: str, result: map{hits: [map]}} # successful operation
@errors {4XX: error}

@endpoint POST /collections/{collection_name}/points/query
@desc Query points
@required {collection_name: str # Name of the collection to query}
@optional {consistency: any # Define read consistency guarantees for the operation, timeout: int # If set, overrides global timeout for this request. Unit is seconds., shard_key: any, prefetch: any=null # Sub-requests to perform first. If present, the query will be performed on the results of the prefetch(es)., query: any # Query to perform. If missing without prefetches, returns points ordered by their IDs., using: str # Define which vector name to use for querying. If missing, the default vector is used., filter: any # Filter conditions - return only those points that satisfy the specified conditions., params: any # Search params for when there is no prefetch, score_threshold: num(float) # Return points with scores better than this threshold., limit: int(uint) # Max number of points to return. Default is 10., offset: int(uint) # Offset of the result. Skip this many points. Default is 0, with_vector: any # Options for specifying which vectors to include into the response. Default is false., with_payload: any # Options for specifying which payload to include or not. Default is false., lookup_from: any=null # The location to use for IDs lookup, if not specified - use the current collection and the 'using' vector Note: the other collection vectors should have the same vector size as the 'using' vector in the current collection}
@returns(200) {usage: any, time: num(float), status: str, result: map{points: [map]}} # successful operation
@errors {4XX: error}

@endpoint POST /collections/{collection_name}/points/query/batch
@desc Query points in batch
@required {collection_name: str # Name of the collection to query, searches: [map{shard_key: any, prefetch: any, query: any, using: str, filter: any, params: any, score_threshold: num(float), limit: int(uint), offset: int(uint), with_vector: any, with_payload: any, lookup_from: any}]}
@optional {consistency: any # Define read consistency guarantees for the operation, timeout: int # If set, overrides global timeout for this request. Unit is seconds.}
@returns(200) {usage: any, time: num(float), status: str, result: [map]} # successful operation
@errors {4XX: error}

@endpoint POST /collections/{collection_name}/points/query/groups
@desc Query points, grouped by a given payload field
@required {collection_name: str # Name of the collection to query, group_by: str # Payload field to group by, must be a string or number field. If the field contains more than 1 value, all values will be used for grouping. One point can be in multiple groups.}
@optional {consistency: any # Define read consistency guarantees for the operation, timeout: int # If set, overrides global timeout for this request. Unit is seconds., shard_key: any, prefetch: any=null # Sub-requests to perform first. If present, the query will be performed on the results of the prefetch(es)., query: any # Query to perform. If missing without prefetches, returns points ordered by their IDs., using: str # Define which vector name to use for querying. If missing, the default vector is used., filter: any # Filter conditions - return only those points that satisfy the specified conditions., params: any # Search params for when there is no prefetch, score_threshold: num(float) # Return points with scores better than this threshold., with_vector: any # Options for specifying which vectors to include into the response. Default is false., with_payload: any # Options for specifying which payload to include or not. Default is false., lookup_from: any=null # The location to use for IDs lookup, if not specified - use the current collection and the 'using' vector Note: the other collection vectors should have the same vector size as the 'using' vector in the current collection, group_size: int(uint) # Maximum amount of points to return per group. Default is 3., limit: int(uint) # Maximum amount of groups to return. Default is 10., with_lookup: any # Look for points in another collection using the group ids}
@returns(200) {usage: any, time: num(float), status: str, result: map{groups: [map]}} # successful operation
@errors {4XX: error}

@endpoint POST /collections/{collection_name}/points/search/matrix/pairs
@desc Search points matrix distance pairs
@required {collection_name: str # Name of the collection to search in}
@optional {consistency: any # Define read consistency guarantees for the operation, timeout: int # If set, overrides global timeout for this request. Unit is seconds., shard_key: any # Specify in which shards to look for the points, if not specified - look in all shards, filter: any # Look only for points which satisfies this conditions, sample: int(uint) # How many points to select and search within. Default is 10., limit: int(uint) # How many neighbours per sample to find. Default is 3., using: str # Define which vector name to use for querying. If missing, the default vector is used.}
@returns(200) {usage: any, time: num(float), status: str, result: map{pairs: [map]}} # successful operation
@errors {4XX: error}

@endpoint POST /collections/{collection_name}/points/search/matrix/offsets
@desc Search points matrix distance offsets
@required {collection_name: str # Name of the collection to search in}
@optional {consistency: any # Define read consistency guarantees for the operation, timeout: int # If set, overrides global timeout for this request. Unit is seconds., shard_key: any # Specify in which shards to look for the points, if not specified - look in all shards, filter: any # Look only for points which satisfies this conditions, sample: int(uint) # How many points to select and search within. Default is 10., limit: int(uint) # How many neighbours per sample to find. Default is 3., using: str # Define which vector name to use for querying. If missing, the default vector is used.}
@returns(200) {usage: any, time: num(float), status: str, result: map{offsets_row: [int(uint64)], offsets_col: [int(uint64)], scores: [num(float)], ids: [any]}} # successful operation
@errors {4XX: error}

@endgroup

@end
