List Llm Models
client.models.list(ModelListParams { provider_category, provider_name, provider_type } query?, RequestOptionsoptions?): ModelListResponse { context_window, max_context_window, model, 21 more }
/v1/models/
List available LLM models using the asynchronous implementation for improved performance.
Returns Model format which extends LLMConfig with additional metadata fields. Legacy LLMConfig fields are marked as deprecated but still available for backward compatibility.
Parameters
Returns
List Llm Models
import Letta from '@letta-ai/letta-client';
const client = new Letta({
apiKey: 'My API Key',
});
const models = await client.models.list();
console.log(models);
[
{
"context_window": 0,
"max_context_window": 0,
"model": "model",
"model_endpoint_type": "openai",
"name": "name",
"provider_type": "anthropic",
"compatibility_type": "gguf",
"display_name": "display_name",
"enable_reasoner": true,
"frequency_penalty": 0,
"handle": "handle",
"max_reasoning_tokens": 0,
"max_tokens": 0,
"model_endpoint": "model_endpoint",
"model_type": "llm",
"model_wrapper": "model_wrapper",
"parallel_tool_calls": true,
"provider_category": "base",
"provider_name": "provider_name",
"put_inner_thoughts_in_kwargs": true,
"reasoning_effort": "minimal",
"temperature": 0,
"tier": "tier",
"verbosity": "low"
}
]Returns Examples
[
{
"context_window": 0,
"max_context_window": 0,
"model": "model",
"model_endpoint_type": "openai",
"name": "name",
"provider_type": "anthropic",
"compatibility_type": "gguf",
"display_name": "display_name",
"enable_reasoner": true,
"frequency_penalty": 0,
"handle": "handle",
"max_reasoning_tokens": 0,
"max_tokens": 0,
"model_endpoint": "model_endpoint",
"model_type": "llm",
"model_wrapper": "model_wrapper",
"parallel_tool_calls": true,
"provider_category": "base",
"provider_name": "provider_name",
"put_inner_thoughts_in_kwargs": true,
"reasoning_effort": "minimal",
"temperature": 0,
"tier": "tier",
"verbosity": "low"
}
]