@lap v0.3
# Machine-readable API spec. Each @endpoint block is one API call.
@api Mistral AI API
@base https://api.mistral.ai
@version 1.0.0
@auth Bearer bearer
@endpoints 154
@hint download_for_search
@toc models(3), conversations(10), agents(12), conversations#stream(1), files(6), fine_tuning(8), batch(4), chat(3), fim(1), embeddings(1), moderations(1), ocr(1), classifications(1), audio(9), libraries(18), observability(41), workflows(34)

@group models
@endpoint GET /v1/models
@desc List Models
@optional {provider: any, model: any}
@returns(200) {object: str, data: [any]} # Successful Response
@errors {422: Validation Error}

@endpoint GET /v1/models/{model_id}
@desc Retrieve Model
@required {model_id: str # The ID of the model to retrieve.}
@returns(200) Successful Response
@errors {422: Validation Error}

@endpoint DELETE /v1/models/{model_id}
@desc Delete Model
@required {model_id: str # The ID of the model to delete.}
@returns(200) {id: str, object: str, deleted: bool} # Successful Response
@errors {422: Validation Error}

@endgroup

@group conversations
@endpoint POST /v1/conversations
@desc Create a conversation and append entries to it.
@returns(200) {object: str, conversation_id: str, outputs: [any], usage: map{prompt_tokens: int, completion_tokens: int, total_tokens: int, connector_tokens: any, connectors: any}, guardrails: any} # Successful Response
@errors {422: Validation Error}

@endpoint GET /v1/conversations
@desc List all created conversations.
@optional {page: int=0, page_size: int=100, metadata: any}
@returns(200) Successful Response
@errors {422: Validation Error}

@endpoint GET /v1/conversations/{conversation_id}
@desc Retrieve a conversation information.
@required {conversation_id: str # ID of the conversation from which we are fetching metadata.}
@returns(200) Successful Response
@errors {422: Validation Error}

@endpoint DELETE /v1/conversations/{conversation_id}
@desc Delete a conversation.
@required {conversation_id: str # ID of the conversation from which we are fetching metadata.}
@returns(204) Successful Response
@errors {422: Validation Error}

@endpoint POST /v1/conversations/{conversation_id}
@desc Append new entries to an existing conversation.
@required {conversation_id: str # ID of the conversation to which we append entries.}
@returns(200) {object: str, conversation_id: str, outputs: [any], usage: map{prompt_tokens: int, completion_tokens: int, total_tokens: int, connector_tokens: any, connectors: any}, guardrails: any} # Successful Response
@errors {422: Validation Error}

@endpoint GET /v1/conversations/{conversation_id}/history
@desc Retrieve all entries in a conversation.
@required {conversation_id: str # ID of the conversation from which we are fetching entries.}
@returns(200) {object: str, conversation_id: str, entries: [any]} # Successful Response
@errors {422: Validation Error}

@endpoint GET /v1/conversations/{conversation_id}/messages
@desc Retrieve all messages in a conversation.
@required {conversation_id: str # ID of the conversation from which we are fetching messages.}
@returns(200) {object: str, conversation_id: str, messages: [any]} # Successful Response
@errors {422: Validation Error}

@endpoint POST /v1/conversations/{conversation_id}/restart
@desc Restart a conversation starting from a given entry.
@required {conversation_id: str # ID of the original conversation which is being restarted.}
@returns(200) {object: str, conversation_id: str, outputs: [any], usage: map{prompt_tokens: int, completion_tokens: int, total_tokens: int, connector_tokens: any, connectors: any}, guardrails: any} # Successful Response
@errors {422: Validation Error}

@endgroup

@group agents
@endpoint POST /v1/agents
@desc Create a agent that can be used within a conversation.
@required {model: str, name: str}
@optional {instructions: any # Instruction prompt the model will follow during the conversation., tools: [any] # List of tools which are available to the model during the conversation., completion_args: map{stop: any, presence_penalty: any, frequency_penalty: any, temperature: any, top_p: any, max_tokens: any, random_seed: any, prediction: any, response_format: any, tool_choice: str, reasoning_effort: any} # White-listed arguments from the completion API, guardrails: any, description: any, handoffs: any, metadata: any, version_message: any}
@returns(200) {instructions: any, tools: [any], completion_args: map{stop: any, presence_penalty: any, frequency_penalty: any, temperature: any, top_p: any, max_tokens: any, random_seed: any, prediction: any, response_format: any, tool_choice: str, reasoning_effort: any}, guardrails: any, model: str, name: str, description: any, handoffs: any, metadata: any, object: str, id: str, version: int, versions: [int], created_at: str(date-time), updated_at: str(date-time), deployment_chat: bool, source: str, version_message: any} # Successful Response
@errors {422: Validation Error}

@endpoint GET /v1/agents
@desc List agent entities.
@optional {page: int=0 # Page number (0-indexed), page_size: int=20 # Number of agents per page, deployment_chat: any, sources: any, name: any # Filter by agent name, search: any # Search agents by name or ID, id: any, metadata: any}
@returns(200) Successful Response
@errors {422: Validation Error}

@endpoint GET /v1/agents/{agent_id}
@desc Retrieve an agent entity.
@required {agent_id: str}
@optional {agent_version: any}
@returns(200) {instructions: any, tools: [any], completion_args: map{stop: any, presence_penalty: any, frequency_penalty: any, temperature: any, top_p: any, max_tokens: any, random_seed: any, prediction: any, response_format: any, tool_choice: str, reasoning_effort: any}, guardrails: any, model: str, name: str, description: any, handoffs: any, metadata: any, object: str, id: str, version: int, versions: [int], created_at: str(date-time), updated_at: str(date-time), deployment_chat: bool, source: str, version_message: any} # Successful Response
@errors {422: Validation Error}

@endpoint PATCH /v1/agents/{agent_id}
@desc Update an agent entity.
@required {agent_id: str}
@optional {instructions: any # Instruction prompt the model will follow during the conversation., tools: [any] # List of tools which are available to the model during the conversation., completion_args: map{stop: any, presence_penalty: any, frequency_penalty: any, temperature: any, top_p: any, max_tokens: any, random_seed: any, prediction: any, response_format: any, tool_choice: str, reasoning_effort: any} # White-listed arguments from the completion API, guardrails: any, model: any, name: any, description: any, handoffs: any, deployment_chat: any, metadata: any, version_message: any}
@returns(200) {instructions: any, tools: [any], completion_args: map{stop: any, presence_penalty: any, frequency_penalty: any, temperature: any, top_p: any, max_tokens: any, random_seed: any, prediction: any, response_format: any, tool_choice: str, reasoning_effort: any}, guardrails: any, model: str, name: str, description: any, handoffs: any, metadata: any, object: str, id: str, version: int, versions: [int], created_at: str(date-time), updated_at: str(date-time), deployment_chat: bool, source: str, version_message: any} # Successful Response
@errors {422: Validation Error}

@endpoint DELETE /v1/agents/{agent_id}
@desc Delete an agent entity.
@required {agent_id: str}
@returns(204) Successful Response
@errors {422: Validation Error}

@endpoint PATCH /v1/agents/{agent_id}/version
@desc Update an agent version.
@required {agent_id: str, version: int}
@returns(200) {instructions: any, tools: [any], completion_args: map{stop: any, presence_penalty: any, frequency_penalty: any, temperature: any, top_p: any, max_tokens: any, random_seed: any, prediction: any, response_format: any, tool_choice: str, reasoning_effort: any}, guardrails: any, model: str, name: str, description: any, handoffs: any, metadata: any, object: str, id: str, version: int, versions: [int], created_at: str(date-time), updated_at: str(date-time), deployment_chat: bool, source: str, version_message: any} # Successful Response
@errors {422: Validation Error}

@endpoint GET /v1/agents/{agent_id}/versions
@desc List all versions of an agent.
@required {agent_id: str}
@optional {page: int=0 # Page number (0-indexed), page_size: int=20 # Number of versions per page}
@returns(200) Successful Response
@errors {422: Validation Error}

@endpoint GET /v1/agents/{agent_id}/versions/{version}
@desc Retrieve a specific version of an agent.
@required {agent_id: str, version: str}
@returns(200) {instructions: any, tools: [any], completion_args: map{stop: any, presence_penalty: any, frequency_penalty: any, temperature: any, top_p: any, max_tokens: any, random_seed: any, prediction: any, response_format: any, tool_choice: str, reasoning_effort: any}, guardrails: any, model: str, name: str, description: any, handoffs: any, metadata: any, object: str, id: str, version: int, versions: [int], created_at: str(date-time), updated_at: str(date-time), deployment_chat: bool, source: str, version_message: any} # Successful Response
@errors {422: Validation Error}

@endpoint PUT /v1/agents/{agent_id}/aliases
@desc Create or update an agent version alias.
@required {agent_id: str, alias: str, version: int}
@returns(200) {alias: str, version: int, created_at: str(date-time), updated_at: str(date-time)} # Successful Response
@errors {422: Validation Error}

@endpoint GET /v1/agents/{agent_id}/aliases
@desc List all aliases for an agent.
@required {agent_id: str}
@returns(200) Successful Response
@errors {422: Validation Error}

@endpoint DELETE /v1/agents/{agent_id}/aliases
@desc Delete an agent version alias.
@required {agent_id: str, alias: str}
@returns(204) Successful Response
@errors {422: Validation Error}

@endgroup

@group conversations#stream
@endpoint POST /v1/conversations#stream
@desc Create a conversation and append entries to it.
@returns(200) Successful Response
@errors {422: Validation Error}

@endgroup

@group conversations
@endpoint POST /v1/conversations/{conversation_id}#stream
@desc Append new entries to an existing conversation.
@required {conversation_id: str # ID of the conversation to which we append entries.}
@returns(200) Successful Response
@errors {422: Validation Error}

@endpoint POST /v1/conversations/{conversation_id}/restart#stream
@desc Restart a conversation starting from a given entry.
@required {conversation_id: str # ID of the original conversation which is being restarted.}
@returns(200) Successful Response
@errors {422: Validation Error}

@endgroup

@group files
@endpoint POST /v1/files
@desc Upload File
@returns(200) {id: str(uuid), object: str, bytes: int, created_at: int, filename: str, purpose: str, sample_type: str, num_lines: any, mimetype: any, source: str, signature: any, expires_at: any, visibility: any} # OK

@endpoint GET /v1/files
@desc List Files
@optional {page: int=0, page_size: int=100, include_total: bool=true, sample_type: any, source: any, search: any, purpose: any, mimetypes: any}
@returns(200) {data: [map], object: str, total: any} # OK

@endpoint GET /v1/files/{file_id}
@desc Retrieve File
@required {file_id: str(uuid)}
@returns(200) {id: str(uuid), object: str, bytes: int, created_at: int, filename: str, purpose: str, sample_type: str, num_lines: any, mimetype: any, source: str, signature: any, expires_at: any, visibility: any, deleted: bool} # OK

@endpoint DELETE /v1/files/{file_id}
@desc Delete File
@required {file_id: str(uuid)}
@returns(200) {id: str(uuid), object: str, deleted: bool} # OK

@endpoint GET /v1/files/{file_id}/content
@desc Download File
@required {file_id: str(uuid)}
@returns(200) OK

@endpoint GET /v1/files/{file_id}/url
@desc Get Signed Url
@required {file_id: str(uuid)}
@optional {expiry: int=24 # Number of hours before the url becomes invalid. Defaults to 24h}
@returns(200) {url: str} # OK

@endgroup

@group fine_tuning
@endpoint GET /v1/fine_tuning/jobs
@desc Get Fine Tuning Jobs
@optional {page: int=0 # The page number of the results to be returned., page_size: int=100 # The number of items to return per page., model: any # The model name used for fine-tuning to filter on. When set, the other results are not displayed., created_after: any # The date/time to filter on. When set, the results for previous creation times are not displayed., created_before: any, created_by_me: bool=false # When set, only return results for jobs created by the API caller. Other results are not displayed., status: any # The current job state to filter on. When set, the other results are not displayed., wandb_project: any # The Weights and Biases project to filter on. When set, the other results are not displayed., wandb_name: any # The Weight and Biases run name to filter on. When set, the other results are not displayed., suffix: any # The model suffix to filter on. When set, the other results are not displayed.}
@returns(200) {data: [any], object: str, total: int} # OK

@endpoint POST /v1/fine_tuning/jobs
@desc Create Fine Tuning Job
@required {model: str, hyperparameters: any}
@optional {dry_run: any # * If `true` the job is not spawned, instead the query returns a handful of useful metadata   for the user to perform sanity checks (see `LegacyJobMetadataOut` response). * Otherwise, the job is started and the query returns the job ID along with some of the   input parameters (see `JobOut` response)., training_files: [map{file_id!: str(uuid), weight: num}]=, validation_files: any # A list containing the IDs of uploaded files that contain validation data. If you provide these files, the data is used to generate validation metrics periodically during fine-tuning. These metrics can be viewed in `checkpoints` when getting the status of a running fine-tuning job. The same data should not be present in both train and validation files., suffix: any # A string that will be added to your fine-tuning model name. For example, a suffix of "my-great-model" would produce a model name like `ft:open-mistral-7b:my-great-model:xxx...`, integrations: any # A list of integrations to enable for your fine-tuning job., auto_start: bool # This field will be required in a future release., invalid_sample_skip_percentage: num=0, job_type: any, repositories: any, classifier_targets: any}
@returns(200) OK

@endpoint GET /v1/fine_tuning/jobs/{job_id}
@desc Get Fine Tuning Job
@required {job_id: str(uuid) # The ID of the job to analyse.}
@returns(200) OK

@endpoint POST /v1/fine_tuning/jobs/{job_id}/cancel
@desc Cancel Fine Tuning Job
@required {job_id: str(uuid) # The ID of the job to cancel.}
@returns(200) OK

@endpoint POST /v1/fine_tuning/jobs/{job_id}/start
@desc Start Fine Tuning Job
@required {job_id: str(uuid)}
@returns(200) OK

@endpoint PATCH /v1/fine_tuning/models/{model_id}
@desc Update Fine Tuned Model
@required {model_id: str # The ID of the model to update.}
@optional {name: any, description: any}
@returns(200) OK

@endpoint POST /v1/fine_tuning/models/{model_id}/archive
@desc Archive Fine Tuned Model
@required {model_id: str # The ID of the model to archive.}
@returns(200) {id: str, object: str, archived: bool} # OK

@endpoint DELETE /v1/fine_tuning/models/{model_id}/archive
@desc Unarchive Fine Tuned Model
@required {model_id: str # The ID of the model to unarchive.}
@returns(200) {id: str, object: str, archived: bool} # OK

@endgroup

@group batch
@endpoint GET /v1/batch/jobs
@desc Get Batch Jobs
@optional {page: int=0, page_size: int=100, model: any, agent_id: any, metadata: any, created_after: any, created_by_me: bool=false, status: any, order_by: str(created/-created)=-created}
@returns(200) {data: [map], object: str, total: int} # OK

@endpoint POST /v1/batch/jobs
@desc Create Batch Job
@required {endpoint: str(/v1/chat/completions//v1/embeddings//v1/fim/completions//v1/moderations//v1/chat/moderations//v1/ocr//v1/classifications//v1/chat/classifications//v1/conversations//v1/audio/transcriptions)}
@optional {input_files: any # The list of input files to be used for batch inference, these files should be `jsonl` files, containing the input data corresponding to the bory request for the batch inference in a "body" field. An example of such file is the following: ```json {"custom_id": "0", "body": {"max_tokens": 100, "messages": [{"role": "user", "content": "What is the best French cheese?"}]}} {"custom_id": "1", "body": {"max_tokens": 100, "messages": [{"role": "user", "content": "What is the best French wine?"}]}} ```, requests: any, model: any # The model to be used for batch inference., agent_id: any # In case you want to use a specific agent from the **deprecated** agents api for batch inference, you can specify the agent ID here., metadata: any # The metadata of your choice to be associated with the batch inference job., timeout_hours: int=24 # The timeout in hours for the batch inference job.}
@returns(200) {id: str, object: str, input_files: [str(uuid)], metadata: any, endpoint: str, model: any, agent_id: any, output_file: any, error_file: any, errors: [map], outputs: any, status: str, created_at: int, total_requests: int, completed_requests: int, succeeded_requests: int, failed_requests: int, started_at: any, completed_at: any} # OK

@endpoint GET /v1/batch/jobs/{job_id}
@desc Get Batch Job
@required {job_id: str(uuid)}
@optional {inline: any}
@returns(200) {id: str, object: str, input_files: [str(uuid)], metadata: any, endpoint: str, model: any, agent_id: any, output_file: any, error_file: any, errors: [map], outputs: any, status: str, created_at: int, total_requests: int, completed_requests: int, succeeded_requests: int, failed_requests: int, started_at: any, completed_at: any} # OK

@endpoint POST /v1/batch/jobs/{job_id}/cancel
@desc Cancel Batch Job
@required {job_id: str(uuid)}
@returns(200) {id: str, object: str, input_files: [str(uuid)], metadata: any, endpoint: str, model: any, agent_id: any, output_file: any, error_file: any, errors: [map], outputs: any, status: str, created_at: int, total_requests: int, completed_requests: int, succeeded_requests: int, failed_requests: int, started_at: any, completed_at: any} # OK

@endgroup

@group chat
@endpoint POST /v1/chat/completions
@desc Chat Completion
@required {model: str # ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions., messages: [any] # The prompt(s) to generate completions for, encoded as a list of dict with role and content.}
@optional {temperature: any # What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value., top_p: num=1 # Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both., max_tokens: any # The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length., stream: bool=false # Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON., stop: any # Stop generation if this token is detected. Or if one of these tokens is detected when providing an array, random_seed: any # The seed to use for random sampling. If set, different calls will generate deterministic results., metadata: any, response_format: map{type: str, json_schema: any} # Specify the format that the model must output. By default it will use `{ "type": "text" }`. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ "type": "json_schema" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide., tools: any # A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for., tool_choice: any=auto # Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool., presence_penalty: num=0 # The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative., frequency_penalty: num=0 # The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition., n: any # Number of completions to return for each request, input tokens are only billed once., prediction: map{type: str, content: str} # Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content., parallel_tool_calls: bool=true # Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel., prompt_mode: any # Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. **Deprecated for reasoning models - use `reasoning_effort` parameter instead.**, reasoning_effort: str(high/none) # Controls the reasoning effort level for reasoning models. "high" enables comprehensive reasoning traces, "none" disables reasoning effort., guardrails: any=null # A list of guardrail configurations to apply to this request. Each guardrail specifies a moderation type, categories with thresholds to evaluate, and an action to take on violation., safe_prompt: bool=false # Whether to inject a safety prompt before all conversations.}
@returns(200) Successful Response
@errors {422: Validation Error}

@endgroup

@group fim
@endpoint POST /v1/fim/completions
@desc Fim Completion
@required {model: str=codestral-2404 # ID of the model with FIM to use., prompt: str # The text/code to complete.}
@optional {temperature: any # What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value., top_p: num=1 # Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both., max_tokens: any # The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length., stream: bool=false # Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON., stop: any # Stop generation if this token is detected. Or if one of these tokens is detected when providing an array, random_seed: any # The seed to use for random sampling. If set, different calls will generate deterministic results., metadata: any, suffix: any= # Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`., min_tokens: any # The minimum number of tokens to generate in the completion.}
@returns(200) Successful Response
@errors {422: Validation Error}

@endgroup

@group agents
@endpoint POST /v1/agents/completions
@desc Agents Completion
@required {messages: [any] # The prompt(s) to generate completions for, encoded as a list of dict with role and content., agent_id: str # The ID of the agent to use for this completion.}
@optional {max_tokens: any # The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length., stream: bool=false # Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON., stop: any # Stop generation if this token is detected. Or if one of these tokens is detected when providing an array, random_seed: any # The seed to use for random sampling. If set, different calls will generate deterministic results., metadata: any, response_format: map{type: str, json_schema: any} # Specify the format that the model must output. By default it will use `{ "type": "text" }`. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ "type": "json_schema" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide., tools: any, tool_choice: any=auto, presence_penalty: num=0 # The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative., frequency_penalty: num=0 # The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition., n: any # Number of completions to return for each request, input tokens are only billed once., prediction: map{type: str, content: str} # Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content., parallel_tool_calls: bool=true, prompt_mode: any # Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. **Deprecated for reasoning models - use `reasoning_effort` parameter instead.**, reasoning_effort: str(high/none) # Controls the reasoning effort level for reasoning models. "high" enables comprehensive reasoning traces, "none" disables reasoning effort.}
@returns(200) Successful Response
@errors {422: Validation Error}

@endgroup

@group embeddings
@endpoint POST /v1/embeddings
@desc Embeddings
@required {model: str # ID of the model to use., input: any # Text to embed.}
@optional {metadata: any, output_dimension: any # The dimension of the output embeddings when feature available. If not provided, a default output dimension will be used., output_dtype: str(float/int8/uint8/binary/ubinary), encoding_format: str(float/base64)}
@returns(200) Successful Response
@errors {422: Validation Error}

@endgroup

@group moderations
@endpoint POST /v1/moderations
@desc Moderations
@required {model: str # ID of the model to use., input: any # Text to classify.}
@optional {metadata: any}
@returns(200) {id: str, model: str, results: [map]} # Successful Response
@errors {422: Validation Error}

@endgroup

@group chat
@endpoint POST /v1/chat/moderations
@desc Chat Moderations
@required {input: any # Chat to classify, model: str}
@returns(200) {id: str, model: str, results: [map]} # Successful Response
@errors {422: Validation Error}

@endgroup

@group ocr
@endpoint POST /v1/ocr
@desc OCR
@required {model: any, document: any # Document to run OCR on}
@optional {id: str, pages: any # Specific pages user wants to process in various formats: single number, range, or list of both. Starts from 0, include_image_base64: any # Include image URLs in response, image_limit: any # Max images to extract, image_min_size: any # Minimum height and width of image to extract, bbox_annotation_format: any # Structured output class for extracting useful information from each extracted bounding box / image from document. Only json_schema is valid for this field, document_annotation_format: any # Structured output class for extracting useful information from the entire document. Only json_schema is valid for this field, document_annotation_prompt: any # Optional prompt to guide the model in extracting structured output from the entire document. A document_annotation_format must be provided., table_format: any, extract_header: bool=false, extract_footer: bool=false}
@returns(200) {pages: [map], model: str, document_annotation: any, usage_info: map{pages_processed: int, doc_size_bytes: any}} # Successful Response
@errors {422: Validation Error}

@endgroup

@group classifications
@endpoint POST /v1/classifications
@desc Classifications
@required {model: str # ID of the model to use., input: any # Text to classify.}
@optional {metadata: any}
@returns(200) {id: str, model: str, results: [map]} # Successful Response
@errors {422: Validation Error}

@endgroup

@group chat
@endpoint POST /v1/chat/classifications
@desc Chat Classifications
@required {model: str, input: any # Chat to classify}
@returns(200) {id: str, model: str, results: [map]} # Successful Response
@errors {422: Validation Error}

@endgroup

@group audio
@endpoint POST /v1/audio/transcriptions
@desc Create Transcription
@returns(200) {model: str, text: str, language: any, segments: [map], usage: map{prompt_tokens: int, completion_tokens: int, total_tokens: int, prompt_audio_seconds: any, num_cached_tokens: any, prompt_tokens_details: any, prompt_token_details: any}} # Successful Response

@endpoint POST /v1/audio/transcriptions#stream
@desc Create Streaming Transcription (SSE)
@returns(200) Stream of transcription events

@endpoint POST /v1/audio/speech
@desc Speech
@required {input: str # Text to generate speech from.}
@optional {model: any, stream: bool=false, voice_id: any # The preset or custom voice to use for generating the speech., ref_audio: any # The base64-encoded audio reference for zero-shot voice cloning., response_format: str(pcm/wav/mp3/flac/opus)}
@returns(200) {audio_data: str} # Speech audio data.
@errors {422: Validation Error}

@endpoint GET /v1/audio/voices
@desc List all voices
@optional {limit: int=10 # Maximum number of voices to return, offset: int=0 # Offset for pagination}
@returns(200) {items: [map], total: int, page: int, page_size: int, total_pages: int} # Successful Response
@errors {422: Validation Error}

@endpoint POST /v1/audio/voices
@desc Create a new voice
@required {name: str, sample_audio: str # Base64-encoded audio file}
@optional {slug: any, languages: [str]=, gender: any, age: any, tags: any, color: any, retention_notice: int=30, sample_filename: any # Original filename for extension detection}
@returns(200) {name: str, slug: any, languages: [str], gender: any, age: any, tags: any, color: any, retention_notice: int, id: str(uuid), created_at: str(date-time), user_id: any} # Successful Response
@errors {422: Validation Error}

@endpoint GET /v1/audio/voices/{voice_id}
@desc Get voice details
@required {voice_id: str}
@returns(200) {name: str, slug: any, languages: [str], gender: any, age: any, tags: any, color: any, retention_notice: int, id: str(uuid), created_at: str(date-time), user_id: any} # Successful Response
@errors {422: Validation Error}

@endpoint PATCH /v1/audio/voices/{voice_id}
@desc Update voice metadata
@required {voice_id: str(uuid)}
@optional {name: any, languages: any, gender: any, age: any, tags: any}
@returns(200) {name: str, slug: any, languages: [str], gender: any, age: any, tags: any, color: any, retention_notice: int, id: str(uuid), created_at: str(date-time), user_id: any} # Successful Response
@errors {422: Validation Error}

@endpoint DELETE /v1/audio/voices/{voice_id}
@desc Delete a custom voice
@required {voice_id: str(uuid)}
@returns(200) {name: str, slug: any, languages: [str], gender: any, age: any, tags: any, color: any, retention_notice: int, id: str(uuid), created_at: str(date-time), user_id: any} # Successful Response
@errors {422: Validation Error}

@endpoint GET /v1/audio/voices/{voice_id}/sample
@desc Get voice sample audio
@required {voice_id: str}
@returns(200) Successful Response
@errors {422: Validation Error}

@endgroup

@group libraries
@endpoint GET /v1/libraries
@desc List all libraries you have access to.
@returns(200) {data: [map]} # Successful Response

@endpoint POST /v1/libraries
@desc Create a new Library.
@required {name: str}
@optional {description: any, chunk_size: any}
@returns(201) {id: str(uuid), name: str, created_at: str(date-time), updated_at: str(date-time), owner_id: any, owner_type: str, total_size: int, nb_documents: int, chunk_size: any, emoji: any, description: any, generated_description: any, explicit_user_members_count: any, explicit_workspace_members_count: any, org_sharing_role: any, generated_name: any} # Successful Response
@errors {422: Validation Error}

@endpoint GET /v1/libraries/{library_id}
@desc Detailed information about a specific Library.
@required {library_id: str(uuid)}
@returns(200) {id: str(uuid), name: str, created_at: str(date-time), updated_at: str(date-time), owner_id: any, owner_type: str, total_size: int, nb_documents: int, chunk_size: any, emoji: any, description: any, generated_description: any, explicit_user_members_count: any, explicit_workspace_members_count: any, org_sharing_role: any, generated_name: any} # Successful Response
@errors {422: Validation Error}

@endpoint DELETE /v1/libraries/{library_id}
@desc Delete a library and all of it's document.
@required {library_id: str(uuid)}
@returns(200) {id: str(uuid), name: str, created_at: str(date-time), updated_at: str(date-time), owner_id: any, owner_type: str, total_size: int, nb_documents: int, chunk_size: any, emoji: any, description: any, generated_description: any, explicit_user_members_count: any, explicit_workspace_members_count: any, org_sharing_role: any, generated_name: any} # Successful Response
@errors {422: Validation Error}

@endpoint PUT /v1/libraries/{library_id}
@desc Update a library.
@required {library_id: str(uuid)}
@optional {name: any, description: any}
@returns(200) {id: str(uuid), name: str, created_at: str(date-time), updated_at: str(date-time), owner_id: any, owner_type: str, total_size: int, nb_documents: int, chunk_size: any, emoji: any, description: any, generated_description: any, explicit_user_members_count: any, explicit_workspace_members_count: any, org_sharing_role: any, generated_name: any} # Successful Response
@errors {422: Validation Error}

@endpoint GET /v1/libraries/{library_id}/documents
@desc List documents in a given library.
@required {library_id: str(uuid)}
@optional {search: any, page_size: int=100, page: int=0, filters_attributes: any, sort_by: str=created_at, sort_order: str=desc}
@returns(200) {pagination: map{total_items: int, total_pages: int, current_page: int, page_size: int, has_more: bool}, data: [map]} # Successful Response
@errors {422: Validation Error}

@endpoint POST /v1/libraries/{library_id}/documents
@desc Upload a new document.
@required {library_id: str(uuid)}
@returns(200) {id: str(uuid), library_id: str(uuid), hash: any, mime_type: any, extension: any, size: any, name: str, summary: any, created_at: str(date-time), last_processed_at: any, number_of_pages: any, process_status: str, uploaded_by_id: any, uploaded_by_type: str, tokens_processing_main_content: any, tokens_processing_summary: any, url: any, attributes: any, processing_status: str, tokens_processing_total: int} # A document with the same hash was found in this library. Returns the existing document.
@returns(201) {id: str(uuid), library_id: str(uuid), hash: any, mime_type: any, extension: any, size: any, name: str, summary: any, created_at: str(date-time), last_processed_at: any, number_of_pages: any, process_status: str, uploaded_by_id: any, uploaded_by_type: str, tokens_processing_main_content: any, tokens_processing_summary: any, url: any, attributes: any, processing_status: str, tokens_processing_total: int} # Upload successful, returns the created document information's.
@errors {422: Validation Error}

@endpoint GET /v1/libraries/{library_id}/documents/{document_id}
@desc Retrieve the metadata of a specific document.
@required {library_id: str(uuid), document_id: str(uuid)}
@returns(200) {id: str(uuid), library_id: str(uuid), hash: any, mime_type: any, extension: any, size: any, name: str, summary: any, created_at: str(date-time), last_processed_at: any, number_of_pages: any, process_status: str, uploaded_by_id: any, uploaded_by_type: str, tokens_processing_main_content: any, tokens_processing_summary: any, url: any, attributes: any, processing_status: str, tokens_processing_total: int} # Successful Response
@errors {422: Validation Error}

@endpoint PUT /v1/libraries/{library_id}/documents/{document_id}
@desc Update the metadata of a specific document.
@required {library_id: str(uuid), document_id: str(uuid)}
@optional {name: any, attributes: any}
@returns(200) {id: str(uuid), library_id: str(uuid), hash: any, mime_type: any, extension: any, size: any, name: str, summary: any, created_at: str(date-time), last_processed_at: any, number_of_pages: any, process_status: str, uploaded_by_id: any, uploaded_by_type: str, tokens_processing_main_content: any, tokens_processing_summary: any, url: any, attributes: any, processing_status: str, tokens_processing_total: int} # Successful Response
@errors {422: Validation Error}

@endpoint DELETE /v1/libraries/{library_id}/documents/{document_id}
@desc Delete a document.
@required {library_id: str(uuid), document_id: str(uuid)}
@returns(204) Successful Response
@errors {422: Validation Error}

@endpoint GET /v1/libraries/{library_id}/documents/{document_id}/text_content
@desc Retrieve the text content of a specific document.
@required {library_id: str(uuid), document_id: str(uuid)}
@returns(200) {text: str} # Successful Response
@errors {422: Validation Error}

@endpoint GET /v1/libraries/{library_id}/documents/{document_id}/status
@desc Retrieve the processing status of a specific document.
@required {library_id: str(uuid), document_id: str(uuid)}
@returns(200) {document_id: str(uuid), process_status: str, processing_status: str} # Successful Response
@errors {422: Validation Error}

@endpoint GET /v1/libraries/{library_id}/documents/{document_id}/signed-url
@desc Retrieve the signed URL of a specific document.
@required {library_id: str(uuid), document_id: str(uuid)}
@returns(200) Successful Response
@errors {422: Validation Error}

@endpoint GET /v1/libraries/{library_id}/documents/{document_id}/extracted-text-signed-url
@desc Retrieve the signed URL of text extracted from a given document.
@required {library_id: str(uuid), document_id: str(uuid)}
@returns(200) Successful Response
@errors {422: Validation Error}

@endpoint POST /v1/libraries/{library_id}/documents/{document_id}/reprocess
@desc Reprocess a document.
@required {library_id: str(uuid), document_id: str(uuid)}
@returns(204) Successful Response
@errors {422: Validation Error}

@endpoint GET /v1/libraries/{library_id}/share
@desc List all of the access to this library.
@required {library_id: str(uuid)}
@returns(200) {data: [map]} # Successful Response
@errors {422: Validation Error}

@endpoint PUT /v1/libraries/{library_id}/share
@desc Create or update an access level.
@required {library_id: str(uuid), level: str(Viewer/Editor), share_with_uuid: str(uuid) # The id of the entity (user, workspace or organization) to share with, share_with_type: str(User/Workspace/Org) # The type of entity, used to share a library.}
@optional {org_id: any}
@returns(200) {library_id: str(uuid), user_id: any, org_id: str(uuid), role: str, share_with_type: str, share_with_uuid: any} # Successful Response
@errors {422: Validation Error}

@endpoint DELETE /v1/libraries/{library_id}/share
@desc Delete an access level.
@required {library_id: str(uuid), share_with_uuid: str(uuid) # The id of the entity (user, workspace or organization) to share with, share_with_type: str(User/Workspace/Org) # The type of entity, used to share a library.}
@optional {org_id: any}
@returns(200) {library_id: str(uuid), user_id: any, org_id: str(uuid), role: str, share_with_type: str, share_with_uuid: any} # Successful Response
@errors {422: Validation Error}

@endgroup

@group observability
@endpoint POST /v1/observability/chat-completion-events/search
@desc Get Chat Completion Events
@required {search_params: map{filters!: any}}
@optional {page_size: int=50, cursor: any, extra_fields: any}
@returns(200) {completion_events: map{results: [map], next: any, cursor: any}} # Successful Response
@errors {400: Bad Request - Invalid request parameters or data, 404: Not Found - Resource does not exist, 408: Request Timeout - Operation timed out, 409: Conflict - Resource conflict, 422: Unprocessable Entity - Validation error}

@endpoint POST /v1/observability/chat-completion-events/search-ids
@desc Alternative to /search that returns only the IDs and that can return many IDs at once
@required {search_params: map{filters!: any}}
@optional {extra_fields: any}
@returns(200) {completion_event_ids: [str]} # Successful Response
@errors {400: Bad Request - Invalid request parameters or data, 404: Not Found - Resource does not exist, 408: Request Timeout - Operation timed out, 409: Conflict - Resource conflict, 422: Unprocessable Entity - Validation error}

@endpoint GET /v1/observability/chat-completion-events/{event_id}
@desc Get Chat Completion Event
@required {event_id: str(uuid)}
@returns(200) {event_id: str, correlation_id: str, created_at: str(date-time), extra_fields: map, nb_input_tokens: int, nb_output_tokens: int, enabled_tools: [map], request_messages: [map], response_messages: [map], nb_messages: int, chat_transcription_events: [map]} # Successful Response
@errors {400: Bad Request - Invalid request parameters or data, 404: Not Found - Resource does not exist, 408: Request Timeout - Operation timed out, 409: Conflict - Resource conflict, 422: Unprocessable Entity - Validation error}

@endpoint GET /v1/observability/chat-completion-events/{event_id}/similar-events
@desc Get Similar Chat Completion Events
@required {event_id: str(uuid)}
@returns(200) {completion_events: map{results: [map], next: any, cursor: any}} # Successful Response
@errors {400: Bad Request - Invalid request parameters or data, 404: Not Found - Resource does not exist, 408: Request Timeout - Operation timed out, 409: Conflict - Resource conflict, 422: Unprocessable Entity - Validation error}

@endpoint GET /v1/observability/chat-completion-fields
@desc Get Chat Completion Fields
@returns(200) {field_definitions: [map], field_groups: [map]} # Successful Response
@errors {400: Bad Request - Invalid request parameters or data, 404: Not Found - Resource does not exist, 408: Request Timeout - Operation timed out, 409: Conflict - Resource conflict, 422: Unprocessable Entity - Validation error}

@endpoint GET /v1/observability/chat-completion-fields/{field_name}/options
@desc Get Chat Completion Field Options
@required {field_name: str, operator: str(lt/lte/gt/gte/startswith/istartswith/endswith/iendswith/contains/icontains/matches/notcontains/inotcontains/eq/neq/isnull/includes/excludes/len_eq) # The operator to use for filtering options}
@returns(200) {options: any} # Successful Response
@errors {400: Bad Request - Invalid request parameters or data, 404: Not Found - Resource does not exist, 408: Request Timeout - Operation timed out, 409: Conflict - Resource conflict, 422: Unprocessable Entity - Validation error}

@endpoint POST /v1/observability/chat-completion-fields/{field_name}/options-counts
@desc Get Chat Completion Field Options Counts
@required {field_name: str}
@optional {filter_params: any}
@returns(200) {counts: [map]} # Successful Response
@errors {400: Bad Request - Invalid request parameters or data, 404: Not Found - Resource does not exist, 408: Request Timeout - Operation timed out, 409: Conflict - Resource conflict, 422: Unprocessable Entity - Validation error}

@endpoint POST /v1/observability/chat-completion-events/{event_id}/live-judging
@desc Run Judge on an event based on the given options
@required {event_id: str(uuid), judge_definition: map{name!: str, description!: str, model_name!: str, output!: any, instructions!: str, tools!: [str]}}
@returns(200) {analysis: str, answer: any} # Successful Response
@errors {400: Bad Request - Invalid request parameters or data, 404: Not Found - Resource does not exist, 408: Request Timeout - Operation timed out, 409: Conflict - Resource conflict, 422: Unprocessable Entity - Validation error}

@endpoint POST /v1/observability/judges
@desc Create a new judge
@required {name: str, description: str, model_name: str, output: any, instructions: str, tools: [str]}
@returns(201) {id: str(uuid), created_at: str(date-time), updated_at: str(date-time), deleted_at: any, owner_id: str(uuid), workspace_id: str(uuid), name: str, description: str, model_name: str, output: any, instructions: str, tools: [str], up_revision: any, down_revision: any, base_revision: any} # Successful Response
@errors {400: Bad Request - Invalid request parameters or data, 404: Not Found - Resource does not exist, 408: Request Timeout - Operation timed out, 409: Conflict - Resource conflict, 422: Unprocessable Entity - Validation error}

@endpoint GET /v1/observability/judges
@desc Get judges with optional filtering and search
@optional {type_filter: any # Filter by judge output types, model_filter: any # Filter by model names, page_size: int=50, page: int=1, q: any}
@returns(200) {judges: map{results: [map], count: int, next: any, previous: any}} # Successful Response
@errors {400: Bad Request - Invalid request parameters or data, 404: Not Found - Resource does not exist, 408: Request Timeout - Operation timed out, 409: Conflict - Resource conflict, 422: Unprocessable Entity - Validation error}

@endpoint GET /v1/observability/judges/{judge_id}
@desc Get judge by id
@required {judge_id: str(uuid)}
@returns(200) {id: str(uuid), created_at: str(date-time), updated_at: str(date-time), deleted_at: any, owner_id: str(uuid), workspace_id: str(uuid), name: str, description: str, model_name: str, output: any, instructions: str, tools: [str], up_revision: any, down_revision: any, base_revision: any} # Successful Response
@errors {400: Bad Request - Invalid request parameters or data, 404: Not Found - Resource does not exist, 408: Request Timeout - Operation timed out, 409: Conflict - Resource conflict, 422: Unprocessable Entity - Validation error}

@endpoint DELETE /v1/observability/judges/{judge_id}
@desc Delete a judge
@required {judge_id: str(uuid)}
@returns(204) Successful Response
@errors {400: Bad Request - Invalid request parameters or data, 404: Not Found - Resource does not exist, 408: Request Timeout - Operation timed out, 409: Conflict - Resource conflict, 422: Unprocessable Entity - Validation error}

@endpoint PUT /v1/observability/judges/{judge_id}
@desc Update a judge
@required {judge_id: str(uuid), name: str, description: str, model_name: str, output: any, instructions: str, tools: [str]}
@returns(204) Successful Response
@errors {400: Bad Request - Invalid request parameters or data, 404: Not Found - Resource does not exist, 408: Request Timeout - Operation timed out, 409: Conflict - Resource conflict, 422: Unprocessable Entity - Validation error}

@endpoint POST /v1/observability/judges/{judge_id}/live-judging
@desc Run a saved judge on a conversation
@required {judge_id: str(uuid), messages: [map]}
@optional {properties: any}
@returns(200) {analysis: str, answer: any} # Successful Response
@errors {400: Bad Request - Invalid request parameters or data, 404: Not Found - Resource does not exist, 408: Request Timeout - Operation timed out, 409: Conflict - Resource conflict, 422: Unprocessable Entity - Validation error}

@endpoint POST /v1/observability/campaigns
@desc Create and start a new campaign
@required {search_params: map{filters!: any}, judge_id: str(uuid), name: str, description: str, max_nb_events: int}
@returns(201) {id: str(uuid), created_at: str(date-time), updated_at: str(date-time), deleted_at: any, name: str, owner_id: str(uuid), workspace_id: str(uuid), description: str, max_nb_events: int, search_params: map{filters: any}, judge: map{id: str(uuid), created_at: str(date-time), updated_at: str(date-time), deleted_at: any, owner_id: str(uuid), workspace_id: str(uuid), name: str, description: str, model_name: str, output: any, instructions: str, tools: [str], up_revision: any, down_revision: any, base_revision: any}} # Successful Response
@errors {400: Bad Request - Invalid request parameters or data, 404: Not Found - Resource does not exist, 408: Request Timeout - Operation timed out, 409: Conflict - Resource conflict, 422: Unprocessable Entity - Validation error}

@endpoint GET /v1/observability/campaigns
@desc Get all campaigns
@optional {page_size: int=50, page: int=1, q: any}
@returns(200) {campaigns: map{results: [map], count: int, next: any, previous: any}} # Successful Response
@errors {400: Bad Request - Invalid request parameters or data, 404: Not Found - Resource does not exist, 408: Request Timeout - Operation timed out, 409: Conflict - Resource conflict, 422: Unprocessable Entity - Validation error}

@endpoint GET /v1/observability/campaigns/{campaign_id}
@desc Get campaign by id
@required {campaign_id: str(uuid)}
@returns(200) {id: str(uuid), created_at: str(date-time), updated_at: str(date-time), deleted_at: any, name: str, owner_id: str(uuid), workspace_id: str(uuid), description: str, max_nb_events: int, search_params: map{filters: any}, judge: map{id: str(uuid), created_at: str(date-time), updated_at: str(date-time), deleted_at: any, owner_id: str(uuid), workspace_id: str(uuid), name: str, description: str, model_name: str, output: any, instructions: str, tools: [str], up_revision: any, down_revision: any, base_revision: any}} # Successful Response
@errors {400: Bad Request - Invalid request parameters or data, 404: Not Found - Resource does not exist, 408: Request Timeout - Operation timed out, 409: Conflict - Resource conflict, 422: Unprocessable Entity - Validation error}

@endpoint DELETE /v1/observability/campaigns/{campaign_id}
@desc Delete a campaign
@required {campaign_id: str(uuid)}
@returns(204) Successful Response
@errors {400: Bad Request - Invalid request parameters or data, 404: Not Found - Resource does not exist, 408: Request Timeout - Operation timed out, 409: Conflict - Resource conflict, 422: Unprocessable Entity - Validation error}

@endpoint GET /v1/observability/campaigns/{campaign_id}/status
@desc Get campaign status by campaign id
@required {campaign_id: str(uuid)}
@returns(200) {status: str} # Successful Response
@errors {400: Bad Request - Invalid request parameters or data, 404: Not Found - Resource does not exist, 408: Request Timeout - Operation timed out, 409: Conflict - Resource conflict, 422: Unprocessable Entity - Validation error}

@endpoint GET /v1/observability/campaigns/{campaign_id}/selected-events
@desc Get event ids that were selected by the given campaign
@required {campaign_id: str(uuid)}
@optional {page_size: int=50, page: int=1}
@returns(200) {completion_events: map{results: [map], count: int, next: any, previous: any}} # Successful Response
@errors {400: Bad Request - Invalid request parameters or data, 404: Not Found - Resource does not exist, 408: Request Timeout - Operation timed out, 409: Conflict - Resource conflict, 422: Unprocessable Entity - Validation error}

@endpoint POST /v1/observability/datasets
@desc Create a new empty dataset
@required {name: str, description: str}
@returns(201) {id: str(uuid), created_at: str(date-time), updated_at: str(date-time), deleted_at: any, name: str, description: str, owner_id: str(uuid), workspace_id: str(uuid)} # Successful Response
@errors {400: Bad Request - Invalid request parameters or data, 404: Not Found - Resource does not exist, 408: Request Timeout - Operation timed out, 409: Conflict - Resource conflict, 422: Unprocessable Entity - Validation error}

@endpoint GET /v1/observability/datasets
@desc List existing datasets
@optional {page_size: int=50, page: int=1, q: any}
@returns(200) {datasets: map{results: [map], count: int, next: any, previous: any}} # Successful Response
@errors {400: Bad Request - Invalid request parameters or data, 404: Not Found - Resource does not exist, 408: Request Timeout - Operation timed out, 409: Conflict - Resource conflict, 422: Unprocessable Entity - Validation error}

@endpoint GET /v1/observability/datasets/{dataset_id}
@desc Get dataset by id
@required {dataset_id: str(uuid)}
@returns(200) {id: str(uuid), created_at: str(date-time), updated_at: str(date-time), deleted_at: any, name: str, description: str, owner_id: str(uuid), workspace_id: str(uuid)} # Successful Response
@errors {400: Bad Request - Invalid request parameters or data, 404: Not Found - Resource does not exist, 408: Request Timeout - Operation timed out, 409: Conflict - Resource conflict, 422: Unprocessable Entity - Validation error}

@endpoint DELETE /v1/observability/datasets/{dataset_id}
@desc Delete a dataset
@required {dataset_id: str(uuid)}
@returns(204) Successful Response
@errors {400: Bad Request - Invalid request parameters or data, 404: Not Found - Resource does not exist, 408: Request Timeout - Operation timed out, 409: Conflict - Resource conflict, 422: Unprocessable Entity - Validation error}

@endpoint PATCH /v1/observability/datasets/{dataset_id}
@desc Patch dataset
@required {dataset_id: str(uuid)}
@optional {name: any, description: any}
@returns(200) {id: str(uuid), created_at: str(date-time), updated_at: str(date-time), deleted_at: any, name: str, description: str, owner_id: str(uuid), workspace_id: str(uuid)} # Successful Response
@errors {400: Bad Request - Invalid request parameters or data, 404: Not Found - Resource does not exist, 408: Request Timeout - Operation timed out, 409: Conflict - Resource conflict, 422: Unprocessable Entity - Validation error}

@endpoint GET /v1/observability/datasets/{dataset_id}/records
@desc List existing records in the dataset
@required {dataset_id: str(uuid)}
@optional {page_size: int=50, page: int=1}
@returns(200) {records: map{results: [map], count: int, next: any, previous: any}} # Successful Response
@errors {400: Bad Request - Invalid request parameters or data, 404: Not Found - Resource does not exist, 408: Request Timeout - Operation timed out, 409: Conflict - Resource conflict, 422: Unprocessable Entity - Validation error}

@endpoint POST /v1/observability/datasets/{dataset_id}/records
@desc Add a conversation to the dataset
@required {dataset_id: str(uuid), payload: map{messages!: [map]}, properties: map}
@returns(201) {id: str(uuid), created_at: str(date-time), updated_at: str(date-time), deleted_at: any, dataset_id: str(uuid), payload: map{messages: [map]}, properties: map, source: str} # Successful Response
@errors {400: Bad Request - Invalid request parameters or data, 404: Not Found - Resource does not exist, 408: Request Timeout - Operation timed out, 409: Conflict - Resource conflict, 422: Unprocessable Entity - Validation error}

@endpoint POST /v1/observability/datasets/{dataset_id}/imports/from-campaign
@desc Populate the dataset with a campaign
@required {dataset_id: str(uuid), campaign_id: str(uuid)}
@returns(202) {id: str(uuid), created_at: str(date-time), updated_at: str(date-time), deleted_at: any, creator_id: str(uuid), dataset_id: str(uuid), workspace_id: str(uuid), status: str, progress: any, message: any} # Successful Response
@errors {400: Bad Request - Invalid request parameters or data, 404: Not Found - Resource does not exist, 408: Request Timeout - Operation timed out, 409: Conflict - Resource conflict, 422: Unprocessable Entity - Validation error}

@endpoint POST /v1/observability/datasets/{dataset_id}/imports/from-explorer
@desc Populate the dataset with samples from the explorer
@required {dataset_id: str(uuid), completion_event_ids: [str]}
@returns(202) {id: str(uuid), created_at: str(date-time), updated_at: str(date-time), deleted_at: any, creator_id: str(uuid), dataset_id: str(uuid), workspace_id: str(uuid), status: str, progress: any, message: any} # Successful Response
@errors {400: Bad Request - Invalid request parameters or data, 404: Not Found - Resource does not exist, 408: Request Timeout - Operation timed out, 409: Conflict - Resource conflict, 422: Unprocessable Entity - Validation error}

@endpoint POST /v1/observability/datasets/{dataset_id}/imports/from-file
@desc Populate the dataset with samples from an uploaded file
@required {dataset_id: str(uuid), file_id: str}
@returns(202) {id: str(uuid), created_at: str(date-time), updated_at: str(date-time), deleted_at: any, creator_id: str(uuid), dataset_id: str(uuid), workspace_id: str(uuid), status: str, progress: any, message: any} # Successful Response
@errors {400: Bad Request - Invalid request parameters or data, 404: Not Found - Resource does not exist, 408: Request Timeout - Operation timed out, 409: Conflict - Resource conflict, 422: Unprocessable Entity - Validation error}

@endpoint POST /v1/observability/datasets/{dataset_id}/imports/from-playground
@desc Populate the dataset with samples from the playground
@required {dataset_id: str(uuid), conversation_ids: [str]}
@returns(202) {id: str(uuid), created_at: str(date-time), updated_at: str(date-time), deleted_at: any, creator_id: str(uuid), dataset_id: str(uuid), workspace_id: str(uuid), status: str, progress: any, message: any} # Successful Response
@errors {400: Bad Request - Invalid request parameters or data, 404: Not Found - Resource does not exist, 408: Request Timeout - Operation timed out, 409: Conflict - Resource conflict, 422: Unprocessable Entity - Validation error}

@endpoint POST /v1/observability/datasets/{dataset_id}/imports/from-dataset
@desc Populate the dataset with samples from another dataset
@required {dataset_id: str(uuid), dataset_record_ids: [str(uuid)]}
@returns(202) {id: str(uuid), created_at: str(date-time), updated_at: str(date-time), deleted_at: any, creator_id: str(uuid), dataset_id: str(uuid), workspace_id: str(uuid), status: str, progress: any, message: any} # Successful Response
@errors {400: Bad Request - Invalid request parameters or data, 404: Not Found - Resource does not exist, 408: Request Timeout - Operation timed out, 409: Conflict - Resource conflict, 422: Unprocessable Entity - Validation error}

@endpoint GET /v1/observability/datasets/{dataset_id}/exports/to-jsonl
@desc Export to the Files API and retrieve presigned URL to download the resulting JSONL file
@required {dataset_id: str(uuid)}
@returns(200) {file_url: str} # Successful Response
@errors {400: Bad Request - Invalid request parameters or data, 404: Not Found - Resource does not exist, 408: Request Timeout - Operation timed out, 409: Conflict - Resource conflict, 422: Unprocessable Entity - Validation error}

@endpoint GET /v1/observability/datasets/{dataset_id}/tasks/{task_id}
@desc Get status of a dataset import task
@required {dataset_id: str(uuid), task_id: str(uuid)}
@returns(200) {id: str(uuid), created_at: str(date-time), updated_at: str(date-time), deleted_at: any, creator_id: str(uuid), dataset_id: str(uuid), workspace_id: str(uuid), status: str, progress: any, message: any} # Successful Response
@errors {400: Bad Request - Invalid request parameters or data, 404: Not Found - Resource does not exist, 408: Request Timeout - Operation timed out, 409: Conflict - Resource conflict, 422: Unprocessable Entity - Validation error}

@endpoint GET /v1/observability/datasets/{dataset_id}/tasks
@desc List import tasks for the given dataset
@required {dataset_id: str(uuid)}
@optional {page_size: int=50, page: int=1}
@returns(200) {tasks: map{results: [map], count: int, next: any, previous: any}} # Successful Response
@errors {400: Bad Request - Invalid request parameters or data, 404: Not Found - Resource does not exist, 408: Request Timeout - Operation timed out, 409: Conflict - Resource conflict, 422: Unprocessable Entity - Validation error}

@endpoint GET /v1/observability/dataset-records/{dataset_record_id}
@desc Get the content of a given conversation from a dataset
@required {dataset_record_id: str(uuid)}
@returns(200) {id: str(uuid), created_at: str(date-time), updated_at: str(date-time), deleted_at: any, dataset_id: str(uuid), payload: map{messages: [map]}, properties: map, source: str} # Successful Response
@errors {400: Bad Request - Invalid request parameters or data, 404: Not Found - Resource does not exist, 408: Request Timeout - Operation timed out, 409: Conflict - Resource conflict, 422: Unprocessable Entity - Validation error}

@endpoint DELETE /v1/observability/dataset-records/{dataset_record_id}
@desc Delete a record from a dataset
@required {dataset_record_id: str(uuid)}
@returns(204) Successful Response
@errors {400: Bad Request - Invalid request parameters or data, 404: Not Found - Resource does not exist, 408: Request Timeout - Operation timed out, 409: Conflict - Resource conflict, 422: Unprocessable Entity - Validation error}

@endpoint POST /v1/observability/dataset-records/bulk-delete
@desc Delete multiple records from datasets
@required {dataset_record_ids: [str(uuid)]}
@returns(204) Successful Response
@errors {400: Bad Request - Invalid request parameters or data, 404: Not Found - Resource does not exist, 408: Request Timeout - Operation timed out, 409: Conflict - Resource conflict, 422: Unprocessable Entity - Validation error}

@endpoint POST /v1/observability/dataset-records/{dataset_record_id}/live-judging
@desc Run Judge on a dataset record based on the given options
@required {dataset_record_id: str(uuid), judge_definition: map{name!: str, description!: str, model_name!: str, output!: any, instructions!: str, tools!: [str]}}
@returns(200) {analysis: str, answer: any} # Successful Response
@errors {400: Bad Request - Invalid request parameters or data, 404: Not Found - Resource does not exist, 408: Request Timeout - Operation timed out, 409: Conflict - Resource conflict, 422: Unprocessable Entity - Validation error}

@endpoint PUT /v1/observability/dataset-records/{dataset_record_id}/payload
@desc Update a dataset record conversation payload
@required {dataset_record_id: str(uuid), payload: map{messages!: [map]}}
@returns(204) Successful Response
@errors {400: Bad Request - Invalid request parameters or data, 404: Not Found - Resource does not exist, 408: Request Timeout - Operation timed out, 409: Conflict - Resource conflict, 422: Unprocessable Entity - Validation error}

@endpoint PUT /v1/observability/dataset-records/{dataset_record_id}/properties
@desc Update conversation properties
@required {dataset_record_id: str(uuid), properties: map}
@returns(204) Successful Response
@errors {400: Bad Request - Invalid request parameters or data, 404: Not Found - Resource does not exist, 408: Request Timeout - Operation timed out, 409: Conflict - Resource conflict, 422: Unprocessable Entity - Validation error}

@endgroup

@group workflows
@endpoint GET /v1/workflows/executions/{execution_id}
@desc Get Workflow Execution
@required {execution_id: str}
@returns(200) {workflow_name: str, execution_id: str, parent_execution_id: any, root_execution_id: str, status: any, start_time: str(date-time), end_time: any, total_duration_ms: any, result: any} # Successful Response
@errors {422: Validation Error}

@endpoint GET /v1/workflows/executions/{execution_id}/history
@desc Get Workflow Execution History
@required {execution_id: str}
@optional {decode_payloads: bool=false}
@returns(200) Successful Response
@errors {422: Validation Error}

@endpoint POST /v1/workflows/executions/{execution_id}/signals
@desc Signal Workflow Execution
@required {execution_id: str, name: str # The name of the signal to send}
@optional {input: any # Input data for the signal, matching its schema}
@returns(202) {message: str} # Successful Response
@errors {422: Validation Error}

@endpoint POST /v1/workflows/executions/{execution_id}/queries
@desc Query Workflow Execution
@required {execution_id: str, name: str # The name of the query to request}
@optional {input: any # Input data for the query, matching its schema}
@returns(200) {query_name: str, result: any} # Successful Response
@errors {422: Validation Error}

@endpoint POST /v1/workflows/executions/{execution_id}/terminate
@desc Terminate Workflow Execution
@required {execution_id: str}
@returns(204) Successful Response
@errors {422: Validation Error}

@endpoint POST /v1/workflows/executions/terminate
@desc Batch Terminate Workflow Executions
@required {execution_ids: [str] # List of execution IDs to process}
@returns(200) {results: map} # Successful Response
@errors {422: Validation Error}

@endpoint POST /v1/workflows/executions/{execution_id}/cancel
@desc Cancel Workflow Execution
@required {execution_id: str}
@returns(204) Successful Response
@errors {422: Validation Error}

@endpoint POST /v1/workflows/executions/cancel
@desc Batch Cancel Workflow Executions
@required {execution_ids: [str] # List of execution IDs to process}
@returns(200) {results: map} # Successful Response
@errors {422: Validation Error}

@endpoint POST /v1/workflows/executions/{execution_id}/reset
@desc Reset Workflow
@required {execution_id: str, event_id: int # The event ID to reset the workflow execution to}
@optional {reason: any # Reason for resetting the workflow execution, exclude_signals: bool=false # Whether to exclude signals that happened after the reset point, exclude_updates: bool=false # Whether to exclude updates that happened after the reset point}
@returns(204) Successful Response
@errors {422: Validation Error}

@endpoint POST /v1/workflows/executions/{execution_id}/updates
@desc Update Workflow Execution
@required {execution_id: str, name: str # The name of the update to request}
@optional {input: any # Input data for the update, matching its schema}
@returns(200) {update_name: str, result: any} # Successful Response
@errors {422: Validation Error}

@endpoint GET /v1/workflows/executions/{execution_id}/trace/otel
@desc Get Workflow Execution Trace Otel
@required {execution_id: str}
@returns(200) {workflow_name: str, execution_id: str, parent_execution_id: any, root_execution_id: str, status: any, start_time: str(date-time), end_time: any, total_duration_ms: any, result: any, data_source: str, otel_trace_id: any, otel_trace_data: any} # Successful Response
@errors {422: Validation Error}

@endpoint GET /v1/workflows/executions/{execution_id}/trace/summary
@desc Get Workflow Execution Trace Summary
@required {execution_id: str}
@returns(200) {workflow_name: str, execution_id: str, parent_execution_id: any, root_execution_id: str, status: any, start_time: str(date-time), end_time: any, total_duration_ms: any, result: any, span_tree: any} # Successful Response
@errors {422: Validation Error}

@endpoint GET /v1/workflows/executions/{execution_id}/trace/events
@desc Get Workflow Execution Trace Events
@required {execution_id: str}
@optional {merge_same_id_events: bool=false, include_internal_events: bool=false}
@returns(200) {workflow_name: str, execution_id: str, parent_execution_id: any, root_execution_id: str, status: any, start_time: str(date-time), end_time: any, total_duration_ms: any, result: any, events: [any]} # Successful Response
@errors {422: Validation Error}

@endpoint GET /v1/workflows/executions/{execution_id}/stream
@desc Stream
@required {execution_id: str}
@optional {event_source: any, last_event_id: any}
@returns(200) Stream of Server-Sent Events (SSE)
@errors {422: Validation Error}

@endpoint GET /v1/workflows/{workflow_name}/metrics
@desc Get Workflow Metrics
@required {workflow_name: str}
@optional {start_time: any # Filter workflows started after this time (ISO 8601), end_time: any # Filter workflows started before this time (ISO 8601)}
@returns(200) {execution_count: map{value: any}, success_count: map{value: any}, error_count: map{value: any}, average_latency_ms: map{value: any}, latency_over_time: map{value: [[any]]}, retry_rate: map{value: any}} # Successful Response
@errors {422: Validation Error}

@endpoint GET /v1/workflows/runs
@desc List Runs
@optional {workflow_identifier: any # Filter by workflow name or id, search: any # Search by workflow name, display name or id, status: any # Filter by workflow status, page_size: int=50 # Number of items per page, next_page_token: any # Token for the next page of results}
@returns(200) {executions: [map], next_page_token: any} # Successful Response
@errors {422: Validation Error}

@endpoint GET /v1/workflows/runs/{run_id}
@desc Get Run
@required {run_id: str(uuid)}
@returns(200) {workflow_name: str, execution_id: str, parent_execution_id: any, root_execution_id: str, status: any, start_time: str(date-time), end_time: any, total_duration_ms: any, result: any} # Successful Response
@errors {422: Validation Error}

@endpoint GET /v1/workflows/runs/{run_id}/history
@desc Get Run History
@required {run_id: str(uuid)}
@optional {decode_payloads: bool=false}
@returns(200) Successful Response
@errors {422: Validation Error}

@endpoint GET /v1/workflows/schedules
@desc Get Schedules
@returns(200) {schedules: [map]} # Successful Response

@endpoint POST /v1/workflows/schedules
@desc Schedule Workflow
@required {schedule: map{input!: any, calendars: [map], intervals: [map], cron_expressions: [str], skip: [map], start_at: any, end_at: any, jitter: any, time_zone_name: any, policy: map, schedule_id: any} # Specification of the times scheduled actions may occur.  The times are the union of :py:attr:`calendars`, :py:attr:`intervals`, and :py:attr:`cron_expressions` excluding anything in :py:attr:`skip`.  Used for input where schedule_id is optional (can be provided or auto-generated).}
@optional {workflow_registration_id: any # The ID of the workflow registration to schedule, workflow_version_id: any # Deprecated: use workflow_registration_id, workflow_identifier: any # The name or ID of the workflow to schedule, workflow_task_queue: any # Deprecated. Use deployment_name instead., schedule_id: any # Allows you to specify a custom schedule ID. If not provided, a random ID will be generated., deployment_name: any # Name of the deployment to route this schedule to}
@returns(201) {schedule_id: str} # Successful Response
@errors {422: Validation Error}

@endpoint DELETE /v1/workflows/schedules/{schedule_id}
@desc Unschedule Workflow
@required {schedule_id: str}
@returns(204) Successful Response
@errors {422: Validation Error}

@endpoint GET /v1/workflows/workers/whoami
@desc Get Worker Info
@returns(200) {scheduler_url: str, namespace: str, tls: bool} # Successful Response

@endpoint GET /v1/workflows/events/stream
@desc Get Stream Events
@optional {scope: str(activity/workflow/*)=*, activity_name: str=*, activity_id: str=*, workflow_name: str=*, workflow_exec_id: str=*, root_workflow_exec_id: str=*, parent_workflow_exec_id: str=*, stream: str=*, start_seq: int=0, metadata_filters: any, workflow_event_types: any, last-event-id: any}
@returns(200) Stream of Server-Sent Events (SSE)
@errors {422: Validation Error}

@endpoint GET /v1/workflows/events/list
@desc Get Workflow Events
@optional {root_workflow_exec_id: any # Execution ID of the root workflow that initiated this execution chain., workflow_exec_id: any # Execution ID of the workflow that emitted this event., workflow_run_id: any # Run ID of the workflow that emitted this event., limit: int=100 # Maximum number of events to return., cursor: any # Cursor for pagination.}
@returns(200) {events: [any], next_cursor: any} # Successful Response
@errors {422: Validation Error}

@endpoint GET /v1/workflows/deployments
@desc List Deployments
@optional {active_only: bool=true, workflow_name: any}
@returns(200) {deployments: [map]} # Successful Response
@errors {422: Validation Error}

@endpoint GET /v1/workflows/deployments/{name}
@desc Get Deployment
@required {name: str}
@returns(200) {id: str(uuid), name: str, is_active: bool, created_at: str(date-time), updated_at: str(date-time), workers: [map]} # Successful Response
@errors {422: Validation Error}

@endpoint GET /v1/workflows/registrations
@desc Get Workflow Registrations
@optional {workflow_id: any # The workflow ID to filter by, task_queue: any # The task queue to filter by, active_only: bool=false # Whether to only return active workflows versions, include_shared: bool=true # Whether to include shared workflow versions, workflow_search: any # The workflow name to filter by, archived: any # Filter by archived state. False=exclude archived, True=only archived, None=include all, with_workflow: bool=false # Whether to include the workflow definition, available_in_chat_assistant: any # Whether to only return workflows compatible with chat assistant, limit: int=50 # The maximum number of workflows versions to return, cursor: any # The cursor for pagination}
@returns(200) {workflow_registrations: [map], next_cursor: any, workflow_versions: [map]} # Successful Response
@errors {422: Validation Error}

@endpoint POST /v1/workflows/{workflow_identifier}/execute
@desc Execute Workflow
@required {workflow_identifier: str}
@optional {execution_id: any # Allows you to specify a custom execution ID. If not provided, a random ID will be generated., input: any # The input to the workflow. This should be a dictionary that matches the workflow's input schema., encoded_input: any # Encoded input to the workflow, used when payload encoding is enabled., wait_for_result: bool=false # If true, wait for the workflow to complete and return the result directly., timeout_seconds: any # Maximum time to wait for completion when wait_for_result is true., custom_tracing_attributes: any, task_queue: any # Deprecated. Use deployment_name instead., deployment_name: any # Name of the deployment to route this execution to}
@returns(200) Successful Response
@errors {422: Validation Error}

@endpoint POST /v1/workflows/registrations/{workflow_registration_id}/execute
@desc Execute Workflow Registration
@required {workflow_registration_id: str(uuid)}
@optional {execution_id: any # Allows you to specify a custom execution ID. If not provided, a random ID will be generated., input: any # The input to the workflow. This should be a dictionary that matches the workflow's input schema., encoded_input: any # Encoded input to the workflow, used when payload encoding is enabled., wait_for_result: bool=false # If true, wait for the workflow to complete and return the result directly., timeout_seconds: any # Maximum time to wait for completion when wait_for_result is true., custom_tracing_attributes: any, task_queue: any # Deprecated. Use deployment_name instead., deployment_name: any # Name of the deployment to route this execution to}
@returns(200) Successful Response
@errors {422: Validation Error}

@endpoint GET /v1/workflows/{workflow_identifier}
@desc Get Workflow
@required {workflow_identifier: str}
@returns(200) {workflow: map{id: str(uuid), name: str, display_name: str, type: str, description: any, customer_id: str(uuid), workspace_id: str(uuid), shared_namespace: any, available_in_chat_assistant: bool, is_technical: bool, archived: bool, active: bool}} # Successful Response
@errors {422: Validation Error}

@endpoint PUT /v1/workflows/{workflow_identifier}
@desc Update Workflow
@required {workflow_identifier: str}
@optional {display_name: any # New display name value, description: any # New description value, available_in_chat_assistant: any # Whether to make the workflow available in the chat assistant}
@returns(200) {workflow: map{id: str(uuid), name: str, display_name: str, type: str, description: any, customer_id: str(uuid), workspace_id: str(uuid), shared_namespace: any, available_in_chat_assistant: bool, is_technical: bool, archived: bool}} # Successful Response
@errors {422: Validation Error}

@endpoint GET /v1/workflows/registrations/{workflow_registration_id}
@desc Get Workflow Registration
@required {workflow_registration_id: str(uuid)}
@optional {with_workflow: bool=false # Whether to include the workflow definition, include_shared: bool=true # Whether to include shared workflow versions}
@returns(200) {workflow_registration: map{id: str(uuid), task_queue: str, definition: map{input_schema: map, output_schema: any, signals: [map], queries: [map], updates: [map], enforce_determinism: bool, execution_timeout: num}, workflow_id: str(uuid), workflow: any, compatible_with_chat_assistant: bool, active: bool}, workflow_version: map{id: str(uuid), task_queue: str, definition: map{input_schema: map, output_schema: any, signals: [map], queries: [map], updates: [map], enforce_determinism: bool, execution_timeout: num}, workflow_id: str(uuid), workflow: any, compatible_with_chat_assistant: bool, active: bool}} # Successful Response
@errors {422: Validation Error}

@endpoint PUT /v1/workflows/{workflow_identifier}/archive
@desc Archive Workflow
@required {workflow_identifier: str}
@returns(200) {workflow: map{id: str(uuid), name: str, display_name: str, type: str, description: any, customer_id: str(uuid), workspace_id: str(uuid), shared_namespace: any, available_in_chat_assistant: bool, is_technical: bool, archived: bool}} # Successful Response
@errors {422: Validation Error}

@endpoint PUT /v1/workflows/{workflow_identifier}/unarchive
@desc Unarchive Workflow
@required {workflow_identifier: str}
@returns(200) {workflow: map{id: str(uuid), name: str, display_name: str, type: str, description: any, customer_id: str(uuid), workspace_id: str(uuid), shared_namespace: any, available_in_chat_assistant: bool, is_technical: bool, archived: bool}} # Successful Response
@errors {422: Validation Error}

@endgroup

@end
