@lap v0.3
# Machine-readable API spec. Each @endpoint block is one API call.
@api DeepSeek Chat Completion API
@base https://api.deepseek.com
@version 1.0.0
@endpoints 1
@toc chat(1)

@endpoint POST /chat/completions
@desc Create Chat Completion
@required {messages: [map{content!: str, role!: str}], model: str(deepseek-chat/deepseek-reasoner) # ID of the model to use.}
@optional {frequency_penalty: num=0 # Positive values penalize new tokens based on their frequency  in the text, reducing repetition., max_tokens: int=4096 # The maximum number of tokens to generate., presence_penalty: num=0 # Positive values penalize new tokens that appear in the text, encouraging discussion of new topics., response_format: map{type: str} # Format of the response., stop: map # Sequence where the model stops generating tokens., stream: bool # Whether to stream responses as they are generated., stream_options: map # Options for streaming responses., temperature: num=1 # Controls randomness in generation (higher values = more random)., top_p: num=1 # Nucleus sampling parameter. Tokens are selected from the  top_p probability mass., tools: [map] # Tools available for the model to use., tool_choice: map # Configuration for tool selection., logprobs: bool # Whether to return log probabilities of output tokens., top_logprobs: int # Number of most likely tokens to return with log probabilities. Requires logprobs to be true.}
@returns(200) {id: str, choices: [map], created: int(int64), model: str, system_fingerprint: str, object: str, usage: map} # Successful response.

@end
