@@ -297,6 +297,7 @@ pub struct LanguageModel {
pub supports_tools: bool,
pub supports_images: bool,
pub supports_thinking: bool,
+ pub supported_effort_levels: Vec<SupportedEffortLevel>,
#[serde(default)]
pub supports_streaming_tools: bool,
/// Only used by OpenAI and xAI.
@@ -304,6 +305,12 @@ pub struct LanguageModel {
pub supports_parallel_tool_calls: bool,
}
+#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
+pub struct SupportedEffortLevel {
+ pub name: Arc<str>,
+ pub value: Arc<str>,
+}
+
#[derive(Debug, Serialize, Deserialize)]
pub struct ListModelsResponse {
pub models: Vec<LanguageModel>,
@@ -573,6 +573,12 @@ impl Default for LanguageModelTextStream {
}
}
+#[derive(Debug, Clone)]
+pub struct LanguageModelEffortLevel {
+ pub name: SharedString,
+ pub value: SharedString,
+}
+
pub trait LanguageModel: Send + Sync {
fn id(&self) -> LanguageModelId;
fn name(&self) -> LanguageModelName;
@@ -591,11 +597,16 @@ pub trait LanguageModel: Send + Sync {
None
}
- /// Whether this model supports extended thinking.
+ /// Whether this model supports thinking.
fn supports_thinking(&self) -> bool {
false
}
+ /// Returns the list of supported effort levels that can be used when thinking.
+ fn supported_effort_levels(&self) -> Vec<LanguageModelEffortLevel> {
+ Vec::new()
+ }
+
/// Whether this model supports images
fn supports_images(&self) -> bool;
@@ -19,11 +19,11 @@ use http_client::http::{HeaderMap, HeaderValue};
use http_client::{AsyncBody, HttpClient, HttpRequestExt, Method, Response, StatusCode};
use language_model::{
AuthenticateError, IconOrSvg, LanguageModel, LanguageModelCacheConfiguration,
- LanguageModelCompletionError, LanguageModelCompletionEvent, LanguageModelId, LanguageModelName,
- LanguageModelProvider, LanguageModelProviderId, LanguageModelProviderName,
- LanguageModelProviderState, LanguageModelRequest, LanguageModelToolChoice,
- LanguageModelToolSchemaFormat, LlmApiToken, NeedsLlmTokenRefresh, PaymentRequiredError,
- RateLimiter, RefreshLlmTokenListener,
+ LanguageModelCompletionError, LanguageModelCompletionEvent, LanguageModelEffortLevel,
+ LanguageModelId, LanguageModelName, LanguageModelProvider, LanguageModelProviderId,
+ LanguageModelProviderName, LanguageModelProviderState, LanguageModelRequest,
+ LanguageModelToolChoice, LanguageModelToolSchemaFormat, LlmApiToken, NeedsLlmTokenRefresh,
+ PaymentRequiredError, RateLimiter, RefreshLlmTokenListener,
};
use release_channel::AppVersion;
use schemars::JsonSchema;
@@ -577,6 +577,17 @@ impl LanguageModel for CloudLanguageModel {
self.model.supports_thinking
}
+ fn supported_effort_levels(&self) -> Vec<LanguageModelEffortLevel> {
+ self.model
+ .supported_effort_levels
+ .iter()
+ .map(|effort_level| LanguageModelEffortLevel {
+ name: effort_level.name.clone().into(),
+ value: effort_level.value.clone().into(),
+ })
+ .collect()
+ }
+
fn supports_streaming_tools(&self) -> bool {
self.model.supports_streaming_tools
}