From a2ca07514ccb817f99b6cd3d74ee285d58c7eae9 Mon Sep 17 00:00:00 2001 From: Marshall Bowers Date: Thu, 5 Feb 2026 17:20:08 -0500 Subject: [PATCH] language_model: Add `supported_effort_levels` method to `LanguageModel` (#48523) This PR adds a new `supported_effort_levels` method to the `LanguageModel` trait. This can be used to retrieve the list of effort levels that the model supports, which will eventually be used to drive the UI for selecting the thinking effort. Right now this list will only be populated for Cloud models. Release Notes: - N/A --- .../cloud_llm_client/src/cloud_llm_client.rs | 7 +++++++ crates/language_model/src/language_model.rs | 13 +++++++++++- crates/language_models/src/provider/cloud.rs | 21 ++++++++++++++----- 3 files changed, 35 insertions(+), 6 deletions(-) diff --git a/crates/cloud_llm_client/src/cloud_llm_client.rs b/crates/cloud_llm_client/src/cloud_llm_client.rs index 121558c379abdc164ae4a4a6e57f0aa2b50fa691..882239716676857cfbf65ea9920530591655d819 100644 --- a/crates/cloud_llm_client/src/cloud_llm_client.rs +++ b/crates/cloud_llm_client/src/cloud_llm_client.rs @@ -297,6 +297,7 @@ pub struct LanguageModel { pub supports_tools: bool, pub supports_images: bool, pub supports_thinking: bool, + pub supported_effort_levels: Vec, #[serde(default)] pub supports_streaming_tools: bool, /// Only used by OpenAI and xAI. @@ -304,6 +305,12 @@ pub struct LanguageModel { pub supports_parallel_tool_calls: bool, } +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct SupportedEffortLevel { + pub name: Arc, + pub value: Arc, +} + #[derive(Debug, Serialize, Deserialize)] pub struct ListModelsResponse { pub models: Vec, diff --git a/crates/language_model/src/language_model.rs b/crates/language_model/src/language_model.rs index eac4db09d4c6dd96901cadcdde9bbe881e95d50f..49b4ee83828761629ad6c2531449d55c874d6f18 100644 --- a/crates/language_model/src/language_model.rs +++ b/crates/language_model/src/language_model.rs @@ -573,6 +573,12 @@ impl Default for LanguageModelTextStream { } } +#[derive(Debug, Clone)] +pub struct LanguageModelEffortLevel { + pub name: SharedString, + pub value: SharedString, +} + pub trait LanguageModel: Send + Sync { fn id(&self) -> LanguageModelId; fn name(&self) -> LanguageModelName; @@ -591,11 +597,16 @@ pub trait LanguageModel: Send + Sync { None } - /// Whether this model supports extended thinking. + /// Whether this model supports thinking. fn supports_thinking(&self) -> bool { false } + /// Returns the list of supported effort levels that can be used when thinking. + fn supported_effort_levels(&self) -> Vec { + Vec::new() + } + /// Whether this model supports images fn supports_images(&self) -> bool; diff --git a/crates/language_models/src/provider/cloud.rs b/crates/language_models/src/provider/cloud.rs index 8f627573e5abf1ef75a83e4719ddd253bd831e29..869c6e765bfd771de463c0182d436aeccd87e56e 100644 --- a/crates/language_models/src/provider/cloud.rs +++ b/crates/language_models/src/provider/cloud.rs @@ -19,11 +19,11 @@ use http_client::http::{HeaderMap, HeaderValue}; use http_client::{AsyncBody, HttpClient, HttpRequestExt, Method, Response, StatusCode}; use language_model::{ AuthenticateError, IconOrSvg, LanguageModel, LanguageModelCacheConfiguration, - LanguageModelCompletionError, LanguageModelCompletionEvent, LanguageModelId, LanguageModelName, - LanguageModelProvider, LanguageModelProviderId, LanguageModelProviderName, - LanguageModelProviderState, LanguageModelRequest, LanguageModelToolChoice, - LanguageModelToolSchemaFormat, LlmApiToken, NeedsLlmTokenRefresh, PaymentRequiredError, - RateLimiter, RefreshLlmTokenListener, + LanguageModelCompletionError, LanguageModelCompletionEvent, LanguageModelEffortLevel, + LanguageModelId, LanguageModelName, LanguageModelProvider, LanguageModelProviderId, + LanguageModelProviderName, LanguageModelProviderState, LanguageModelRequest, + LanguageModelToolChoice, LanguageModelToolSchemaFormat, LlmApiToken, NeedsLlmTokenRefresh, + PaymentRequiredError, RateLimiter, RefreshLlmTokenListener, }; use release_channel::AppVersion; use schemars::JsonSchema; @@ -577,6 +577,17 @@ impl LanguageModel for CloudLanguageModel { self.model.supports_thinking } + fn supported_effort_levels(&self) -> Vec { + self.model + .supported_effort_levels + .iter() + .map(|effort_level| LanguageModelEffortLevel { + name: effort_level.name.clone().into(), + value: effort_level.value.clone().into(), + }) + .collect() + } + fn supports_streaming_tools(&self) -> bool { self.model.supports_streaming_tools }