LanguageModels

Conrad Irwin created

Change summary

Cargo.lock                                                |   6 
crates/anthropic/Cargo.toml                               |   1 
crates/anthropic/src/anthropic.rs                         |  19 
crates/google_ai/Cargo.toml                               |   1 
crates/google_ai/src/google_ai.rs                         |  11 
crates/language_model/Cargo.toml                          |   1 
crates/language_model/src/language_model.rs               |   9 
crates/language_models/src/provider/anthropic.rs          |  53 -
crates/language_models/src/provider/bedrock.rs            |  19 
crates/language_models/src/provider/cloud.rs              |  38 
crates/language_models/src/provider/deepseek.rs           |  12 
crates/language_models/src/provider/google.rs             |  27 
crates/language_models/src/provider/lmstudio.rs           |  12 
crates/language_models/src/provider/mistral.rs            |  15 
crates/language_models/src/provider/ollama.rs             |  21 
crates/language_models/src/provider/open_ai.rs            |  13 
crates/language_models/src/provider/open_ai_compatible.rs |  34 
crates/language_models/src/provider/open_router.rs        |  54 -
crates/language_models/src/provider/vercel.rs             |  12 
crates/language_models/src/provider/x_ai.rs               |  12 
crates/language_models/src/settings.rs                    | 459 +++-----
crates/ollama/Cargo.toml                                  |   1 
crates/ollama/src/ollama.rs                               |  24 
crates/open_ai/Cargo.toml                                 |   1 
crates/open_ai/src/open_ai.rs                             |  11 
crates/open_router/Cargo.toml                             |   3 
crates/open_router/src/open_router.rs                     |  50 
crates/settings/src/settings_content.rs                   |   5 
crates/settings/src/settings_content/language_model.rs    | 393 +++++++
29 files changed, 646 insertions(+), 671 deletions(-)

Detailed changes

Cargo.lock 🔗

@@ -658,6 +658,7 @@ dependencies = [
  "schemars",
  "serde",
  "serde_json",
+ "settings",
  "strum 0.27.1",
  "thiserror 2.0.12",
  "workspace-hack",
@@ -7368,6 +7369,7 @@ dependencies = [
  "schemars",
  "serde",
  "serde_json",
+ "settings",
  "strum 0.27.1",
  "workspace-hack",
 ]
@@ -9153,6 +9155,7 @@ dependencies = [
  "schemars",
  "serde",
  "serde_json",
+ "settings",
  "smol",
  "telemetry_events",
  "thiserror 2.0.12",
@@ -11134,6 +11137,7 @@ dependencies = [
  "schemars",
  "serde",
  "serde_json",
+ "settings",
  "workspace-hack",
 ]
 
@@ -11242,6 +11246,7 @@ dependencies = [
  "schemars",
  "serde",
  "serde_json",
+ "settings",
  "strum 0.27.1",
  "workspace-hack",
 ]
@@ -11256,6 +11261,7 @@ dependencies = [
  "schemars",
  "serde",
  "serde_json",
+ "settings",
  "strum 0.27.1",
  "thiserror 2.0.12",
  "util",

crates/anthropic/Cargo.toml 🔗

@@ -23,6 +23,7 @@ http_client.workspace = true
 schemars = { workspace = true, optional = true }
 serde.workspace = true
 serde_json.workspace = true
+settings.workspace = true
 strum.workspace = true
 thiserror.workspace = true
 workspace-hack.workspace = true

crates/anthropic/src/anthropic.rs 🔗

@@ -8,6 +8,7 @@ use futures::{AsyncBufReadExt, AsyncReadExt, StreamExt, io::BufReader, stream::B
 use http_client::http::{self, HeaderMap, HeaderValue};
 use http_client::{AsyncBody, HttpClient, Method, Request as HttpRequest, StatusCode};
 use serde::{Deserialize, Serialize};
+pub use settings::{AnthropicAvailableModel as AvailableModel, ModelMode};
 use strum::{EnumIter, EnumString};
 use thiserror::Error;
 
@@ -31,6 +32,24 @@ pub enum AnthropicModelMode {
     },
 }
 
+impl From<ModelMode> for AnthropicModelMode {
+    fn from(value: ModelMode) -> Self {
+        match value {
+            ModelMode::Default => AnthropicModelMode::Default,
+            ModelMode::Thinking { budget_tokens } => AnthropicModelMode::Thinking { budget_tokens },
+        }
+    }
+}
+
+impl From<AnthropicModelMode> for ModelMode {
+    fn from(value: AnthropicModelMode) -> Self {
+        match value {
+            AnthropicModelMode::Default => ModelMode::Default,
+            AnthropicModelMode::Thinking { budget_tokens } => ModelMode::Thinking { budget_tokens },
+        }
+    }
+}
+
 #[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))]
 #[derive(Clone, Debug, Default, Serialize, Deserialize, PartialEq, EnumIter)]
 pub enum Model {

crates/google_ai/Cargo.toml 🔗

@@ -21,5 +21,6 @@ http_client.workspace = true
 schemars = { workspace = true, optional = true }
 serde.workspace = true
 serde_json.workspace = true
+settings.workspace = true
 strum.workspace = true
 workspace-hack.workspace = true

crates/google_ai/src/google_ai.rs 🔗

@@ -4,6 +4,7 @@ use anyhow::{Result, anyhow, bail};
 use futures::{AsyncBufReadExt, AsyncReadExt, StreamExt, io::BufReader, stream::BoxStream};
 use http_client::{AsyncBody, HttpClient, Method, Request as HttpRequest};
 use serde::{Deserialize, Deserializer, Serialize, Serializer};
+pub use settings::ModelMode as GoogleModelMode;
 
 pub const API_URL: &str = "https://generativelanguage.googleapis.com";
 
@@ -295,16 +296,6 @@ pub struct ThinkingConfig {
     pub thinking_budget: u32,
 }
 
-#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))]
-#[derive(Copy, Clone, Debug, Default, Serialize, Deserialize, PartialEq, Eq)]
-pub enum GoogleModelMode {
-    #[default]
-    Default,
-    Thinking {
-        budget_tokens: Option<u32>,
-    },
-}
-
 #[derive(Debug, Deserialize, Serialize)]
 #[serde(rename_all = "camelCase")]
 pub struct GenerationConfig {

crates/language_model/Cargo.toml 🔗

@@ -35,6 +35,7 @@ proto.workspace = true
 schemars.workspace = true
 serde.workspace = true
 serde_json.workspace = true
+settings.workspace = true
 smol.workspace = true
 telemetry_events.workspace = true
 thiserror.workspace = true

crates/language_model/src/language_model.rs 🔗

@@ -21,6 +21,7 @@ use open_router::OpenRouterError;
 use parking_lot::Mutex;
 use schemars::JsonSchema;
 use serde::{Deserialize, Serialize, de::DeserializeOwned};
+pub use settings::LanguageModelCacheConfiguration;
 use std::ops::{Add, Sub};
 use std::str::FromStr;
 use std::sync::Arc;
@@ -62,14 +63,6 @@ pub fn init_settings(cx: &mut App) {
     registry::init(cx);
 }
 
-/// Configuration for caching language model messages.
-#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema)]
-pub struct LanguageModelCacheConfiguration {
-    pub max_cache_anchors: usize,
-    pub should_speculate: bool,
-    pub min_total_token: u64,
-}
-
 /// A completion event from a language model.
 #[derive(Debug, PartialEq, Clone, Serialize, Deserialize)]
 pub enum LanguageModelCompletionEvent {

crates/language_models/src/provider/anthropic.rs 🔗

@@ -22,8 +22,6 @@ use language_model::{
     LanguageModelToolResultContent, MessageContent, RateLimiter, Role,
 };
 use language_model::{LanguageModelCompletionEvent, LanguageModelToolUse, StopReason};
-use schemars::JsonSchema;
-use serde::{Deserialize, Serialize};
 use settings::{Settings, SettingsStore};
 use std::pin::Pin;
 use std::str::FromStr;
@@ -33,6 +31,8 @@ use theme::ThemeSettings;
 use ui::{Icon, IconName, List, Tooltip, prelude::*};
 use util::ResultExt;
 
+pub use settings::AnthropicAvailableModel as AvailableModel;
+
 const PROVIDER_ID: LanguageModelProviderId = language_model::ANTHROPIC_PROVIDER_ID;
 const PROVIDER_NAME: LanguageModelProviderName = language_model::ANTHROPIC_PROVIDER_NAME;
 
@@ -43,55 +43,6 @@ pub struct AnthropicSettings {
     pub available_models: Vec<AvailableModel>,
 }
 
-#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema)]
-pub struct AvailableModel {
-    /// The model's name in the Anthropic API. e.g. claude-3-5-sonnet-latest, claude-3-opus-20240229, etc
-    pub name: String,
-    /// The model's name in Zed's UI, such as in the model selector dropdown menu in the assistant panel.
-    pub display_name: Option<String>,
-    /// The model's context window size.
-    pub max_tokens: u64,
-    /// A model `name` to substitute when calling tools, in case the primary model doesn't support tool calling.
-    pub tool_override: Option<String>,
-    /// Configuration of Anthropic's caching API.
-    pub cache_configuration: Option<LanguageModelCacheConfiguration>,
-    pub max_output_tokens: Option<u64>,
-    pub default_temperature: Option<f32>,
-    #[serde(default)]
-    pub extra_beta_headers: Vec<String>,
-    /// The model's mode (e.g. thinking)
-    pub mode: Option<ModelMode>,
-}
-
-#[derive(Clone, Debug, Default, PartialEq, Serialize, Deserialize, JsonSchema)]
-#[serde(tag = "type", rename_all = "lowercase")]
-pub enum ModelMode {
-    #[default]
-    Default,
-    Thinking {
-        /// The maximum number of tokens to use for reasoning. Must be lower than the model's `max_output_tokens`.
-        budget_tokens: Option<u32>,
-    },
-}
-
-impl From<ModelMode> for AnthropicModelMode {
-    fn from(value: ModelMode) -> Self {
-        match value {
-            ModelMode::Default => AnthropicModelMode::Default,
-            ModelMode::Thinking { budget_tokens } => AnthropicModelMode::Thinking { budget_tokens },
-        }
-    }
-}
-
-impl From<AnthropicModelMode> for ModelMode {
-    fn from(value: AnthropicModelMode) -> Self {
-        match value {
-            AnthropicModelMode::Default => ModelMode::Default,
-            AnthropicModelMode::Thinking { budget_tokens } => ModelMode::Thinking { budget_tokens },
-        }
-    }
-}
-
 pub struct AnthropicLanguageModelProvider {
     http_client: Arc<dyn HttpClient>,
     state: gpui::Entity<State>,

crates/language_models/src/provider/bedrock.rs 🔗

@@ -42,7 +42,7 @@ use language_model::{
 use schemars::JsonSchema;
 use serde::{Deserialize, Serialize};
 use serde_json::Value;
-use settings::{Settings, SettingsStore};
+use settings::{BedrockAvailableModel as AvailableModel, Settings, SettingsStore};
 use smol::lock::OnceCell;
 use strum::{EnumIter, IntoEnumIterator, IntoStaticStr};
 use theme::ThemeSettings;
@@ -83,15 +83,14 @@ pub enum BedrockAuthMethod {
     Automatic,
 }
 
-#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema)]
-pub struct AvailableModel {
-    pub name: String,
-    pub display_name: Option<String>,
-    pub max_tokens: u64,
-    pub cache_configuration: Option<LanguageModelCacheConfiguration>,
-    pub max_output_tokens: Option<u64>,
-    pub default_temperature: Option<f32>,
-    pub mode: Option<ModelMode>,
+impl From<settings::BedrockAuthMethodContent> for BedrockAuthMethod {
+    fn from(value: settings::BedrockAuthMethodContent) -> Self {
+        match value {
+            settings::BedrockAuthMethodContent::SingleSignOn => BedrockAuthMethod::SingleSignOn,
+            settings::BedrockAuthMethodContent::Automatic => BedrockAuthMethod::Automatic,
+            settings::BedrockAuthMethodContent::NamedProfile => BedrockAuthMethod::NamedProfile,
+        }
+    }
 }
 
 #[derive(Clone, Debug, Default, PartialEq, Serialize, Deserialize, JsonSchema)]

crates/language_models/src/provider/cloud.rs 🔗

@@ -32,6 +32,8 @@ use release_channel::AppVersion;
 use schemars::JsonSchema;
 use serde::{Deserialize, Serialize, de::DeserializeOwned};
 use settings::SettingsStore;
+pub use settings::ZedDotDevAvailableModel as AvailableModel;
+pub use settings::ZedDotDevAvailableProvider as AvailableProvider;
 use smol::io::{AsyncReadExt, BufReader};
 use std::pin::Pin;
 use std::str::FromStr as _;
@@ -52,42 +54,6 @@ const PROVIDER_NAME: LanguageModelProviderName = language_model::ZED_CLOUD_PROVI
 pub struct ZedDotDevSettings {
     pub available_models: Vec<AvailableModel>,
 }
-
-#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema)]
-#[serde(rename_all = "lowercase")]
-pub enum AvailableProvider {
-    Anthropic,
-    OpenAi,
-    Google,
-}
-
-#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema)]
-pub struct AvailableModel {
-    /// The provider of the language model.
-    pub provider: AvailableProvider,
-    /// The model's name in the provider's API. e.g. claude-3-5-sonnet-20240620
-    pub name: String,
-    /// The name displayed in the UI, such as in the assistant panel model dropdown menu.
-    pub display_name: Option<String>,
-    /// The size of the context window, indicating the maximum number of tokens the model can process.
-    pub max_tokens: usize,
-    /// The maximum number of output tokens allowed by the model.
-    pub max_output_tokens: Option<u64>,
-    /// The maximum number of completion tokens allowed by the model (o1-* only)
-    pub max_completion_tokens: Option<u64>,
-    /// Override this model with a different Anthropic model for tool calls.
-    pub tool_override: Option<String>,
-    /// Indicates whether this custom model supports caching.
-    pub cache_configuration: Option<LanguageModelCacheConfiguration>,
-    /// The default temperature to use for this model.
-    pub default_temperature: Option<f32>,
-    /// Any extra beta headers to provide when using the model.
-    #[serde(default)]
-    pub extra_beta_headers: Vec<String>,
-    /// The model's mode (e.g. thinking)
-    pub mode: Option<ModelMode>,
-}
-
 #[derive(Default, Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema)]
 #[serde(tag = "type", rename_all = "lowercase")]
 pub enum ModelMode {

crates/language_models/src/provider/deepseek.rs 🔗

@@ -16,8 +16,7 @@ use language_model::{
     LanguageModelToolChoice, LanguageModelToolResultContent, LanguageModelToolUse, MessageContent,
     RateLimiter, Role, StopReason, TokenUsage,
 };
-use schemars::JsonSchema;
-use serde::{Deserialize, Serialize};
+pub use settings::DeepseekAvailableModel as AvailableModel;
 use settings::{Settings, SettingsStore};
 use std::pin::Pin;
 use std::str::FromStr;
@@ -44,15 +43,6 @@ pub struct DeepSeekSettings {
     pub api_url: String,
     pub available_models: Vec<AvailableModel>,
 }
-
-#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema)]
-pub struct AvailableModel {
-    pub name: String,
-    pub display_name: Option<String>,
-    pub max_tokens: u64,
-    pub max_output_tokens: Option<u64>,
-}
-
 pub struct DeepSeekLanguageModelProvider {
     http_client: Arc<dyn HttpClient>,
     state: Entity<State>,

crates/language_models/src/provider/google.rs 🔗

@@ -23,6 +23,7 @@ use language_model::{
 };
 use schemars::JsonSchema;
 use serde::{Deserialize, Serialize};
+pub use settings::GoogleAvailableModel as AvailableModel;
 use settings::{Settings, SettingsStore};
 use std::pin::Pin;
 use std::sync::{
@@ -59,32 +60,6 @@ pub enum ModelMode {
     },
 }
 
-impl From<ModelMode> for GoogleModelMode {
-    fn from(value: ModelMode) -> Self {
-        match value {
-            ModelMode::Default => GoogleModelMode::Default,
-            ModelMode::Thinking { budget_tokens } => GoogleModelMode::Thinking { budget_tokens },
-        }
-    }
-}
-
-impl From<GoogleModelMode> for ModelMode {
-    fn from(value: GoogleModelMode) -> Self {
-        match value {
-            GoogleModelMode::Default => ModelMode::Default,
-            GoogleModelMode::Thinking { budget_tokens } => ModelMode::Thinking { budget_tokens },
-        }
-    }
-}
-
-#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema)]
-pub struct AvailableModel {
-    name: String,
-    display_name: Option<String>,
-    max_tokens: u64,
-    mode: Option<ModelMode>,
-}
-
 pub struct GoogleLanguageModelProvider {
     http_client: Arc<dyn HttpClient>,
     state: gpui::Entity<State>,

crates/language_models/src/provider/lmstudio.rs 🔗

@@ -15,8 +15,7 @@ use language_model::{
     LanguageModelRequest, RateLimiter, Role,
 };
 use lmstudio::{ModelType, get_models};
-use schemars::JsonSchema;
-use serde::{Deserialize, Serialize};
+pub use settings::LmStudioAvailableModel as AvailableModel;
 use settings::{Settings, SettingsStore};
 use std::pin::Pin;
 use std::str::FromStr;
@@ -40,15 +39,6 @@ pub struct LmStudioSettings {
     pub available_models: Vec<AvailableModel>,
 }
 
-#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema)]
-pub struct AvailableModel {
-    pub name: String,
-    pub display_name: Option<String>,
-    pub max_tokens: u64,
-    pub supports_tool_calls: bool,
-    pub supports_images: bool,
-}
-
 pub struct LmStudioLanguageModelProvider {
     http_client: Arc<dyn HttpClient>,
     state: gpui::Entity<State>,

crates/language_models/src/provider/mistral.rs 🔗

@@ -15,8 +15,7 @@ use language_model::{
     RateLimiter, Role, StopReason, TokenUsage,
 };
 use mistral::StreamResponse;
-use schemars::JsonSchema;
-use serde::{Deserialize, Serialize};
+pub use settings::MistralAvailableModel as AvailableModel;
 use settings::{Settings, SettingsStore};
 use std::collections::HashMap;
 use std::pin::Pin;
@@ -38,18 +37,6 @@ pub struct MistralSettings {
     pub available_models: Vec<AvailableModel>,
 }
 
-#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema)]
-pub struct AvailableModel {
-    pub name: String,
-    pub display_name: Option<String>,
-    pub max_tokens: u64,
-    pub max_output_tokens: Option<u64>,
-    pub max_completion_tokens: Option<u64>,
-    pub supports_tools: Option<bool>,
-    pub supports_images: Option<bool>,
-    pub supports_thinking: Option<bool>,
-}
-
 pub struct MistralLanguageModelProvider {
     http_client: Arc<dyn HttpClient>,
     state: gpui::Entity<State>,

crates/language_models/src/provider/ollama.rs 🔗

@@ -14,8 +14,7 @@ use ollama::{
     ChatMessage, ChatOptions, ChatRequest, ChatResponseDelta, KeepAlive, OllamaFunctionCall,
     OllamaFunctionTool, OllamaToolCall, get_models, show_model, stream_chat_completion,
 };
-use schemars::JsonSchema;
-use serde::{Deserialize, Serialize};
+pub use settings::OllamaAvailableModel as AvailableModel;
 use settings::{Settings, SettingsStore};
 use std::pin::Pin;
 use std::sync::atomic::{AtomicU64, Ordering};
@@ -39,24 +38,6 @@ pub struct OllamaSettings {
     pub available_models: Vec<AvailableModel>,
 }
 
-#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema)]
-pub struct AvailableModel {
-    /// The model name in the Ollama API (e.g. "llama3.2:latest")
-    pub name: String,
-    /// The model's name in Zed's UI, such as in the model selector dropdown menu in the assistant panel.
-    pub display_name: Option<String>,
-    /// The Context Length parameter to the model (aka num_ctx or n_ctx)
-    pub max_tokens: u64,
-    /// The number of seconds to keep the connection open after the last request
-    pub keep_alive: Option<KeepAlive>,
-    /// Whether the model supports tools
-    pub supports_tools: Option<bool>,
-    /// Whether the model supports vision
-    pub supports_images: Option<bool>,
-    /// Whether to enable think mode
-    pub supports_thinking: Option<bool>,
-}
-
 pub struct OllamaLanguageModelProvider {
     http_client: Arc<dyn HttpClient>,
     state: gpui::Entity<State>,

crates/language_models/src/provider/open_ai.rs 🔗

@@ -15,8 +15,7 @@ use language_model::{
 };
 use menu;
 use open_ai::{ImageUrl, Model, ReasoningEffort, ResponseStreamEvent, stream_completion};
-use schemars::JsonSchema;
-use serde::{Deserialize, Serialize};
+use settings::OpenAiAvailableModel as AvailableModel;
 use settings::{Settings, SettingsStore};
 use std::pin::Pin;
 use std::str::FromStr as _;
@@ -38,16 +37,6 @@ pub struct OpenAiSettings {
     pub available_models: Vec<AvailableModel>,
 }
 
-#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema)]
-pub struct AvailableModel {
-    pub name: String,
-    pub display_name: Option<String>,
-    pub max_tokens: u64,
-    pub max_output_tokens: Option<u64>,
-    pub max_completion_tokens: Option<u64>,
-    pub reasoning_effort: Option<ReasoningEffort>,
-}
-
 pub struct OpenAiLanguageModelProvider {
     http_client: Arc<dyn HttpClient>,
     state: gpui::Entity<State>,

crates/language_models/src/provider/open_ai_compatible.rs 🔗

@@ -13,8 +13,6 @@ use language_model::{
 };
 use menu;
 use open_ai::{ResponseStreamEvent, stream_completion};
-use schemars::JsonSchema;
-use serde::{Deserialize, Serialize};
 use settings::{Settings, SettingsStore};
 use std::sync::Arc;
 
@@ -24,6 +22,8 @@ use util::ResultExt;
 
 use crate::AllLanguageModelSettings;
 use crate::provider::open_ai::{OpenAiEventMapper, into_open_ai};
+pub use settings::OpenAiCompatibleAvailableModel as AvailableModel;
+pub use settings::OpenAiCompatibleModelCapabilities as ModelCapabilities;
 
 #[derive(Default, Clone, Debug, PartialEq)]
 pub struct OpenAiCompatibleSettings {
@@ -31,36 +31,6 @@ pub struct OpenAiCompatibleSettings {
     pub available_models: Vec<AvailableModel>,
 }
 
-#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema)]
-pub struct AvailableModel {
-    pub name: String,
-    pub display_name: Option<String>,
-    pub max_tokens: u64,
-    pub max_output_tokens: Option<u64>,
-    pub max_completion_tokens: Option<u64>,
-    #[serde(default)]
-    pub capabilities: ModelCapabilities,
-}
-
-#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema)]
-pub struct ModelCapabilities {
-    pub tools: bool,
-    pub images: bool,
-    pub parallel_tool_calls: bool,
-    pub prompt_cache_key: bool,
-}
-
-impl Default for ModelCapabilities {
-    fn default() -> Self {
-        Self {
-            tools: true,
-            images: false,
-            parallel_tool_calls: false,
-            prompt_cache_key: false,
-        }
-    }
-}
-
 pub struct OpenAiCompatibleLanguageModelProvider {
     id: LanguageModelProviderId,
     name: LanguageModelProviderName,

crates/language_models/src/provider/open_router.rs 🔗

@@ -15,12 +15,9 @@ use language_model::{
     LanguageModelToolUse, MessageContent, RateLimiter, Role, StopReason, TokenUsage,
 };
 use open_router::{
-    Model, ModelMode as OpenRouterModelMode, Provider, ResponseStreamEvent, list_models,
-    stream_completion,
+    Model, ModelMode as OpenRouterModelMode, ResponseStreamEvent, list_models, stream_completion,
 };
-use schemars::JsonSchema;
-use serde::{Deserialize, Serialize};
-use settings::{Settings, SettingsStore};
+use settings::{OpenRouterAvailableModel as AvailableModel, Settings, SettingsStore};
 use std::pin::Pin;
 use std::str::FromStr as _;
 use std::sync::Arc;
@@ -39,51 +36,6 @@ pub struct OpenRouterSettings {
     pub available_models: Vec<AvailableModel>,
 }
 
-#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema)]
-pub struct AvailableModel {
-    pub name: String,
-    pub display_name: Option<String>,
-    pub max_tokens: u64,
-    pub max_output_tokens: Option<u64>,
-    pub max_completion_tokens: Option<u64>,
-    pub supports_tools: Option<bool>,
-    pub supports_images: Option<bool>,
-    pub mode: Option<ModelMode>,
-    pub provider: Option<Provider>,
-}
-
-#[derive(Clone, Debug, Default, PartialEq, Serialize, Deserialize, JsonSchema)]
-#[serde(tag = "type", rename_all = "lowercase")]
-pub enum ModelMode {
-    #[default]
-    Default,
-    Thinking {
-        budget_tokens: Option<u32>,
-    },
-}
-
-impl From<ModelMode> for OpenRouterModelMode {
-    fn from(value: ModelMode) -> Self {
-        match value {
-            ModelMode::Default => OpenRouterModelMode::Default,
-            ModelMode::Thinking { budget_tokens } => {
-                OpenRouterModelMode::Thinking { budget_tokens }
-            }
-        }
-    }
-}
-
-impl From<OpenRouterModelMode> for ModelMode {
-    fn from(value: OpenRouterModelMode) -> Self {
-        match value {
-            OpenRouterModelMode::Default => ModelMode::Default,
-            OpenRouterModelMode::Thinking { budget_tokens } => {
-                ModelMode::Thinking { budget_tokens }
-            }
-        }
-    }
-}
-
 pub struct OpenRouterLanguageModelProvider {
     http_client: Arc<dyn HttpClient>,
     state: gpui::Entity<State>,
@@ -297,7 +249,7 @@ impl LanguageModelProvider for OpenRouterLanguageModelProvider {
                 max_tokens: model.max_tokens,
                 supports_tools: model.supports_tools,
                 supports_images: model.supports_images,
-                mode: model.mode.clone().unwrap_or_default().into(),
+                mode: model.mode.clone().unwrap_or_default(),
                 provider: model.provider.clone(),
             });
         }

crates/language_models/src/provider/vercel.rs 🔗

@@ -12,13 +12,12 @@ use language_model::{
 };
 use menu;
 use open_ai::ResponseStreamEvent;
-use schemars::JsonSchema;
-use serde::{Deserialize, Serialize};
 use settings::{Settings, SettingsStore};
 use std::sync::Arc;
 use strum::IntoEnumIterator;
 use vercel::Model;
 
+pub use settings::VercelAvailableModel as AvailableModel;
 use ui::{ElevationIndex, List, Tooltip, prelude::*};
 use ui_input::SingleLineInput;
 use util::ResultExt;
@@ -34,15 +33,6 @@ pub struct VercelSettings {
     pub available_models: Vec<AvailableModel>,
 }
 
-#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema)]
-pub struct AvailableModel {
-    pub name: String,
-    pub display_name: Option<String>,
-    pub max_tokens: u64,
-    pub max_output_tokens: Option<u64>,
-    pub max_completion_tokens: Option<u64>,
-}
-
 pub struct VercelLanguageModelProvider {
     http_client: Arc<dyn HttpClient>,
     state: gpui::Entity<State>,

crates/language_models/src/provider/x_ai.rs 🔗

@@ -12,13 +12,12 @@ use language_model::{
 };
 use menu;
 use open_ai::ResponseStreamEvent;
-use schemars::JsonSchema;
-use serde::{Deserialize, Serialize};
 use settings::{Settings, SettingsStore};
 use std::sync::Arc;
 use strum::IntoEnumIterator;
 use x_ai::Model;
 
+pub use settings::XaiAvailableModel as AvailableModel;
 use ui::{ElevationIndex, List, Tooltip, prelude::*};
 use ui_input::SingleLineInput;
 use util::ResultExt;
@@ -34,15 +33,6 @@ pub struct XAiSettings {
     pub available_models: Vec<AvailableModel>,
 }
 
-#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema)]
-pub struct AvailableModel {
-    pub name: String,
-    pub display_name: Option<String>,
-    pub max_tokens: u64,
-    pub max_output_tokens: Option<u64>,
-    pub max_completion_tokens: Option<u64>,
-}
-
 pub struct XAiLanguageModelProvider {
     http_client: Arc<dyn HttpClient>,
     state: gpui::Entity<State>,

crates/language_models/src/settings.rs 🔗

@@ -1,27 +1,16 @@
 use std::sync::Arc;
 
-use anyhow::Result;
 use collections::HashMap;
 use gpui::App;
-use schemars::JsonSchema;
-use serde::{Deserialize, Serialize};
-use settings::{Settings, SettingsKey, SettingsSources, SettingsUi};
+use settings::Settings;
+use util::MergeFrom;
 
 use crate::provider::{
-    self,
-    anthropic::AnthropicSettings,
-    bedrock::AmazonBedrockSettings,
-    cloud::{self, ZedDotDevSettings},
-    deepseek::DeepSeekSettings,
-    google::GoogleSettings,
-    lmstudio::LmStudioSettings,
-    mistral::MistralSettings,
-    ollama::OllamaSettings,
-    open_ai::OpenAiSettings,
-    open_ai_compatible::OpenAiCompatibleSettings,
-    open_router::OpenRouterSettings,
-    vercel::VercelSettings,
-    x_ai::XAiSettings,
+    anthropic::AnthropicSettings, bedrock::AmazonBedrockSettings, cloud::ZedDotDevSettings,
+    deepseek::DeepSeekSettings, google::GoogleSettings, lmstudio::LmStudioSettings,
+    mistral::MistralSettings, ollama::OllamaSettings, open_ai::OpenAiSettings,
+    open_ai_compatible::OpenAiCompatibleSettings, open_router::OpenRouterSettings,
+    vercel::VercelSettings, x_ai::XAiSettings,
 };
 
 /// Initializes the language model settings.
@@ -46,281 +35,197 @@ pub struct AllLanguageModelSettings {
     pub zed_dot_dev: ZedDotDevSettings,
 }
 
-#[derive(
-    Default, Clone, Debug, Serialize, Deserialize, PartialEq, JsonSchema, SettingsUi, SettingsKey,
-)]
-#[settings_key(key = "language_models")]
-pub struct AllLanguageModelSettingsContent {
-    pub anthropic: Option<AnthropicSettingsContent>,
-    pub bedrock: Option<AmazonBedrockSettingsContent>,
-    pub deepseek: Option<DeepseekSettingsContent>,
-    pub google: Option<GoogleSettingsContent>,
-    pub lmstudio: Option<LmStudioSettingsContent>,
-    pub mistral: Option<MistralSettingsContent>,
-    pub ollama: Option<OllamaSettingsContent>,
-    pub open_router: Option<OpenRouterSettingsContent>,
-    pub openai: Option<OpenAiSettingsContent>,
-    pub openai_compatible: Option<HashMap<Arc<str>, OpenAiCompatibleSettingsContent>>,
-    pub vercel: Option<VercelSettingsContent>,
-    pub x_ai: Option<XAiSettingsContent>,
-    #[serde(rename = "zed.dev")]
-    pub zed_dot_dev: Option<ZedDotDevSettingsContent>,
-}
-
-#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, JsonSchema)]
-pub struct AnthropicSettingsContent {
-    pub api_url: Option<String>,
-    pub available_models: Option<Vec<provider::anthropic::AvailableModel>>,
-}
-
-#[derive(Default, Clone, Debug, Serialize, Deserialize, PartialEq, JsonSchema)]
-pub struct AmazonBedrockSettingsContent {
-    available_models: Option<Vec<provider::bedrock::AvailableModel>>,
-    endpoint_url: Option<String>,
-    region: Option<String>,
-    profile: Option<String>,
-    authentication_method: Option<provider::bedrock::BedrockAuthMethod>,
-}
-
-#[derive(Default, Clone, Debug, Serialize, Deserialize, PartialEq, JsonSchema)]
-pub struct OllamaSettingsContent {
-    pub api_url: Option<String>,
-    pub available_models: Option<Vec<provider::ollama::AvailableModel>>,
-}
-
-#[derive(Default, Clone, Debug, Serialize, Deserialize, PartialEq, JsonSchema)]
-pub struct LmStudioSettingsContent {
-    pub api_url: Option<String>,
-    pub available_models: Option<Vec<provider::lmstudio::AvailableModel>>,
-}
-
-#[derive(Default, Clone, Debug, Serialize, Deserialize, PartialEq, JsonSchema)]
-pub struct DeepseekSettingsContent {
-    pub api_url: Option<String>,
-    pub available_models: Option<Vec<provider::deepseek::AvailableModel>>,
-}
-
-#[derive(Default, Clone, Debug, Serialize, Deserialize, PartialEq, JsonSchema)]
-pub struct MistralSettingsContent {
-    pub api_url: Option<String>,
-    pub available_models: Option<Vec<provider::mistral::AvailableModel>>,
-}
-
-#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, JsonSchema)]
-pub struct OpenAiSettingsContent {
-    pub api_url: Option<String>,
-    pub available_models: Option<Vec<provider::open_ai::AvailableModel>>,
-}
-
-#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, JsonSchema)]
-pub struct OpenAiCompatibleSettingsContent {
-    pub api_url: String,
-    pub available_models: Vec<provider::open_ai_compatible::AvailableModel>,
-}
-
-#[derive(Default, Clone, Debug, Serialize, Deserialize, PartialEq, JsonSchema)]
-pub struct VercelSettingsContent {
-    pub api_url: Option<String>,
-    pub available_models: Option<Vec<provider::vercel::AvailableModel>>,
-}
-
-#[derive(Default, Clone, Debug, Serialize, Deserialize, PartialEq, JsonSchema)]
-pub struct GoogleSettingsContent {
-    pub api_url: Option<String>,
-    pub available_models: Option<Vec<provider::google::AvailableModel>>,
-}
-
-#[derive(Default, Clone, Debug, Serialize, Deserialize, PartialEq, JsonSchema)]
-pub struct XAiSettingsContent {
-    pub api_url: Option<String>,
-    pub available_models: Option<Vec<provider::x_ai::AvailableModel>>,
-}
-
-#[derive(Default, Clone, Debug, Serialize, Deserialize, PartialEq, JsonSchema)]
-pub struct ZedDotDevSettingsContent {
-    available_models: Option<Vec<cloud::AvailableModel>>,
-}
-
-#[derive(Default, Clone, Debug, Serialize, Deserialize, PartialEq, JsonSchema)]
-pub struct OpenRouterSettingsContent {
-    pub api_url: Option<String>,
-    pub available_models: Option<Vec<provider::open_router::AvailableModel>>,
-}
-
 impl settings::Settings for AllLanguageModelSettings {
     const PRESERVED_KEYS: Option<&'static [&'static str]> = Some(&["version"]);
 
-    type FileContent = AllLanguageModelSettingsContent;
-
-    fn load(sources: SettingsSources<Self::FileContent>, _: &mut App) -> Result<Self> {
-        fn merge<T>(target: &mut T, value: Option<T>) {
-            if let Some(value) = value {
-                *target = value;
-            }
+    fn from_defaults(content: &settings::SettingsContent, _cx: &mut App) -> Self {
+        let language_models = content.language_models.clone().unwrap();
+        let anthropic = language_models.anthropic.unwrap();
+        let bedrock = language_models.bedrock.unwrap();
+        let deepseek = language_models.deepseek.unwrap();
+        let google = language_models.google.unwrap();
+        let lmstudio = language_models.lmstudio.unwrap();
+        let mistral = language_models.mistral.unwrap();
+        let ollama = language_models.ollama.unwrap();
+        let open_router = language_models.open_router.unwrap();
+        let openai = language_models.openai.unwrap();
+        let openai_compatible = language_models.openai_compatible.unwrap();
+        let vercel = language_models.vercel.unwrap();
+        let x_ai = language_models.x_ai.unwrap();
+        let zed_dot_dev = language_models.zed_dot_dev.unwrap();
+        Self {
+            anthropic: AnthropicSettings {
+                api_url: anthropic.api_url.unwrap(),
+                available_models: anthropic.available_models.unwrap_or_default(),
+            },
+            bedrock: AmazonBedrockSettings {
+                available_models: bedrock.available_models.unwrap_or_default(),
+                region: bedrock.region,
+                endpoint: bedrock.endpoint_url, // todo(should be api_url)
+                profile_name: bedrock.profile,
+                role_arn: None, // todo(was never a setting for this...)
+                authentication_method: bedrock.authentication_method.map(Into::into),
+            },
+            deepseek: DeepSeekSettings {
+                api_url: deepseek.api_url.unwrap(),
+                available_models: deepseek.available_models.unwrap_or_default(),
+            },
+            google: GoogleSettings {
+                api_url: google.api_url.unwrap(),
+                available_models: google.available_models.unwrap_or_default(),
+            },
+            lmstudio: LmStudioSettings {
+                api_url: lmstudio.api_url.unwrap(),
+                available_models: lmstudio.available_models.unwrap_or_default(),
+            },
+            mistral: MistralSettings {
+                api_url: mistral.api_url.unwrap(),
+                available_models: mistral.available_models.unwrap_or_default(),
+            },
+            ollama: OllamaSettings {
+                api_url: ollama.api_url.unwrap(),
+                available_models: ollama.available_models.unwrap_or_default(),
+            },
+            open_router: OpenRouterSettings {
+                api_url: open_router.api_url.unwrap(),
+                available_models: open_router.available_models.unwrap_or_default(),
+            },
+            openai: OpenAiSettings {
+                api_url: openai.api_url.unwrap(),
+                available_models: openai.available_models.unwrap_or_default(),
+            },
+            openai_compatible: openai_compatible
+                .into_iter()
+                .map(|(key, value)| {
+                    (
+                        key,
+                        OpenAiCompatibleSettings {
+                            api_url: value.api_url,
+                            available_models: value.available_models,
+                        },
+                    )
+                })
+                .collect(),
+            vercel: VercelSettings {
+                api_url: vercel.api_url.unwrap(),
+                available_models: vercel.available_models.unwrap_or_default(),
+            },
+            x_ai: XAiSettings {
+                api_url: x_ai.api_url.unwrap(),
+                available_models: x_ai.available_models.unwrap_or_default(),
+            },
+            zed_dot_dev: ZedDotDevSettings {
+                available_models: zed_dot_dev.available_models.unwrap_or_default(),
+            },
         }
+    }
 
-        let mut settings = AllLanguageModelSettings::default();
-
-        for value in sources.defaults_and_customizations() {
-            // Anthropic
-            let anthropic = value.anthropic.clone();
-            merge(
-                &mut settings.anthropic.api_url,
-                anthropic.as_ref().and_then(|s| s.api_url.clone()),
-            );
-            merge(
-                &mut settings.anthropic.available_models,
-                anthropic.as_ref().and_then(|s| s.available_models.clone()),
-            );
-
-            // Bedrock
-            let bedrock = value.bedrock.clone();
-            merge(
-                &mut settings.bedrock.profile_name,
-                bedrock.as_ref().map(|s| s.profile.clone()),
-            );
-            merge(
-                &mut settings.bedrock.authentication_method,
-                bedrock.as_ref().map(|s| s.authentication_method.clone()),
-            );
-            merge(
-                &mut settings.bedrock.region,
-                bedrock.as_ref().map(|s| s.region.clone()),
-            );
-            merge(
-                &mut settings.bedrock.endpoint,
-                bedrock.as_ref().map(|s| s.endpoint_url.clone()),
-            );
-
-            // Ollama
-            let ollama = value.ollama.clone();
-
-            merge(
-                &mut settings.ollama.api_url,
-                value.ollama.as_ref().and_then(|s| s.api_url.clone()),
-            );
-            merge(
-                &mut settings.ollama.available_models,
-                ollama.as_ref().and_then(|s| s.available_models.clone()),
-            );
-
-            // LM Studio
-            let lmstudio = value.lmstudio.clone();
-
-            merge(
-                &mut settings.lmstudio.api_url,
-                value.lmstudio.as_ref().and_then(|s| s.api_url.clone()),
-            );
-            merge(
-                &mut settings.lmstudio.available_models,
-                lmstudio.as_ref().and_then(|s| s.available_models.clone()),
-            );
+    fn refine(&mut self, content: &settings::SettingsContent, _cx: &mut App) {
+        let Some(models) = content.language_models.as_ref() else {
+            return;
+        };
 
-            // DeepSeek
-            let deepseek = value.deepseek.clone();
+        if let Some(anthropic) = models.anthropic.as_ref() {
+            self.anthropic
+                .available_models
+                .merge_from(&anthropic.available_models);
+            self.anthropic.api_url.merge_from(&anthropic.api_url);
+        }
 
-            merge(
-                &mut settings.deepseek.api_url,
-                value.deepseek.as_ref().and_then(|s| s.api_url.clone()),
-            );
-            merge(
-                &mut settings.deepseek.available_models,
-                deepseek.as_ref().and_then(|s| s.available_models.clone()),
-            );
+        if let Some(bedrock) = models.bedrock.clone() {
+            self.bedrock
+                .available_models
+                .merge_from(&bedrock.available_models);
 
-            // OpenAI
-            let openai = value.openai.clone();
-            merge(
-                &mut settings.openai.api_url,
-                openai.as_ref().and_then(|s| s.api_url.clone()),
-            );
-            merge(
-                &mut settings.openai.available_models,
-                openai.as_ref().and_then(|s| s.available_models.clone()),
-            );
+            if let Some(endpoint_url) = bedrock.endpoint_url {
+                self.bedrock.endpoint = Some(endpoint_url)
+            }
 
-            // OpenAI Compatible
-            if let Some(openai_compatible) = value.openai_compatible.clone() {
-                for (id, openai_compatible_settings) in openai_compatible {
-                    settings.openai_compatible.insert(
-                        id,
-                        OpenAiCompatibleSettings {
-                            api_url: openai_compatible_settings.api_url,
-                            available_models: openai_compatible_settings.available_models,
-                        },
-                    );
-                }
+            if let Some(region) = bedrock.region {
+                self.bedrock.region = Some(region)
             }
 
-            // Vercel
-            let vercel = value.vercel.clone();
-            merge(
-                &mut settings.vercel.api_url,
-                vercel.as_ref().and_then(|s| s.api_url.clone()),
-            );
-            merge(
-                &mut settings.vercel.available_models,
-                vercel.as_ref().and_then(|s| s.available_models.clone()),
-            );
+            if let Some(profile_name) = bedrock.profile {
+                self.bedrock.profile_name = Some(profile_name);
+            }
 
-            // XAI
-            let x_ai = value.x_ai.clone();
-            merge(
-                &mut settings.x_ai.api_url,
-                x_ai.as_ref().and_then(|s| s.api_url.clone()),
-            );
-            merge(
-                &mut settings.x_ai.available_models,
-                x_ai.as_ref().and_then(|s| s.available_models.clone()),
-            );
+            if let Some(auth_method) = bedrock.authentication_method {
+                self.bedrock.authentication_method = Some(auth_method.into());
+            }
+        }
 
-            // ZedDotDev
-            merge(
-                &mut settings.zed_dot_dev.available_models,
-                value
-                    .zed_dot_dev
-                    .as_ref()
-                    .and_then(|s| s.available_models.clone()),
-            );
-            merge(
-                &mut settings.google.api_url,
-                value.google.as_ref().and_then(|s| s.api_url.clone()),
-            );
-            merge(
-                &mut settings.google.available_models,
-                value
-                    .google
-                    .as_ref()
-                    .and_then(|s| s.available_models.clone()),
-            );
+        if let Some(deepseek) = models.deepseek.as_ref() {
+            self.deepseek
+                .available_models
+                .merge_from(&deepseek.available_models);
+            self.deepseek.api_url.merge_from(&deepseek.api_url);
+        }
 
-            // Mistral
-            let mistral = value.mistral.clone();
-            merge(
-                &mut settings.mistral.api_url,
-                mistral.as_ref().and_then(|s| s.api_url.clone()),
-            );
-            merge(
-                &mut settings.mistral.available_models,
-                mistral.as_ref().and_then(|s| s.available_models.clone()),
-            );
+        if let Some(google) = models.google.as_ref() {
+            self.google
+                .available_models
+                .merge_from(&google.available_models);
+            self.google.api_url.merge_from(&google.api_url);
+        }
 
-            // OpenRouter
-            let open_router = value.open_router.clone();
-            merge(
-                &mut settings.open_router.api_url,
-                open_router.as_ref().and_then(|s| s.api_url.clone()),
-            );
-            merge(
-                &mut settings.open_router.available_models,
-                open_router
-                    .as_ref()
-                    .and_then(|s| s.available_models.clone()),
-            );
+        if let Some(lmstudio) = models.lmstudio.as_ref() {
+            self.lmstudio
+                .available_models
+                .merge_from(&lmstudio.available_models);
+            self.lmstudio.api_url.merge_from(&lmstudio.api_url);
         }
 
-        Ok(settings)
+        if let Some(mistral) = models.mistral.as_ref() {
+            self.mistral
+                .available_models
+                .merge_from(&mistral.available_models);
+            self.mistral.api_url.merge_from(&mistral.api_url);
+        }
+        if let Some(ollama) = models.ollama.as_ref() {
+            self.ollama
+                .available_models
+                .merge_from(&ollama.available_models);
+            self.ollama.api_url.merge_from(&ollama.api_url);
+        }
+        if let Some(open_router) = models.open_router.as_ref() {
+            self.open_router
+                .available_models
+                .merge_from(&open_router.available_models);
+            self.open_router.api_url.merge_from(&open_router.api_url);
+        }
+        if let Some(openai) = models.openai.as_ref() {
+            self.openai
+                .available_models
+                .merge_from(&openai.available_models);
+            self.openai.api_url.merge_from(&openai.api_url);
+        }
+        if let Some(openai_compatible) = models.openai_compatible.clone() {
+            for (name, value) in openai_compatible {
+                self.openai_compatible.insert(
+                    name,
+                    OpenAiCompatibleSettings {
+                        api_url: value.api_url,
+                        available_models: value.available_models,
+                    },
+                );
+            }
+        }
+        if let Some(vercel) = models.vercel.as_ref() {
+            self.vercel
+                .available_models
+                .merge_from(&vercel.available_models);
+            self.vercel.api_url.merge_from(&vercel.api_url);
+        }
+        if let Some(x_ai) = models.x_ai.as_ref() {
+            self.x_ai
+                .available_models
+                .merge_from(&x_ai.available_models);
+            self.x_ai.api_url.merge_from(&x_ai.api_url);
+        }
+        if let Some(zed_dot_dev) = models.zed_dot_dev.as_ref() {
+            self.zed_dot_dev
+                .available_models
+                .merge_from(&zed_dot_dev.available_models);
+        }
     }
-
-    fn import_from_vscode(_vscode: &settings::VsCodeSettings, _current: &mut Self::FileContent) {}
 }

crates/ollama/Cargo.toml 🔗

@@ -22,4 +22,5 @@ http_client.workspace = true
 schemars = { workspace = true, optional = true }
 serde.workspace = true
 serde_json.workspace = true
+settings.workspace = true
 workspace-hack.workspace = true

crates/ollama/src/ollama.rs 🔗

@@ -3,33 +3,11 @@ use futures::{AsyncBufReadExt, AsyncReadExt, StreamExt, io::BufReader, stream::B
 use http_client::{AsyncBody, HttpClient, Method, Request as HttpRequest, http};
 use serde::{Deserialize, Serialize};
 use serde_json::Value;
+pub use settings::KeepAlive;
 use std::time::Duration;
 
 pub const OLLAMA_API_URL: &str = "http://localhost:11434";
 
-#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))]
-#[derive(Clone, Serialize, Deserialize, Debug, Eq, PartialEq)]
-#[serde(untagged)]
-pub enum KeepAlive {
-    /// Keep model alive for N seconds
-    Seconds(isize),
-    /// Keep model alive for a fixed duration. Accepts durations like "5m", "10m", "1h", "1d", etc.
-    Duration(String),
-}
-
-impl KeepAlive {
-    /// Keep model alive until a new model is loaded or until Ollama shuts down
-    fn indefinite() -> Self {
-        Self::Seconds(-1)
-    }
-}
-
-impl Default for KeepAlive {
-    fn default() -> Self {
-        Self::indefinite()
-    }
-}
-
 #[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))]
 #[derive(Clone, Debug, Default, Serialize, Deserialize, PartialEq)]
 pub struct Model {

crates/open_ai/Cargo.toml 🔗

@@ -23,5 +23,6 @@ schemars = { workspace = true, optional = true }
 log.workspace = true
 serde.workspace = true
 serde_json.workspace = true
+settings.workspace = true
 strum.workspace = true
 workspace-hack.workspace = true

crates/open_ai/src/open_ai.rs 🔗

@@ -3,6 +3,7 @@ use futures::{AsyncBufReadExt, AsyncReadExt, StreamExt, io::BufReader, stream::B
 use http_client::{AsyncBody, HttpClient, Method, Request as HttpRequest};
 use serde::{Deserialize, Serialize};
 use serde_json::Value;
+pub use settings::OpenAiReasoningEffort as ReasoningEffort;
 use std::{convert::TryFrom, future::Future};
 use strum::EnumIter;
 
@@ -278,16 +279,6 @@ pub enum ToolChoice {
     Other(ToolDefinition),
 }
 
-#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))]
-#[derive(Debug, Serialize, Deserialize, PartialEq, Clone)]
-#[serde(rename_all = "lowercase")]
-pub enum ReasoningEffort {
-    Minimal,
-    Low,
-    Medium,
-    High,
-}
-
 #[derive(Clone, Deserialize, Serialize, Debug)]
 #[serde(tag = "type", rename_all = "snake_case")]
 pub enum ToolDefinition {

crates/open_router/Cargo.toml 🔗

@@ -22,7 +22,8 @@ http_client.workspace = true
 schemars = { workspace = true, optional = true }
 serde.workspace = true
 serde_json.workspace = true
-thiserror.workspace = true
+settings.workspace = true
 strum.workspace = true
+thiserror.workspace = true
 util.workspace = true
 workspace-hack.workspace = true

crates/open_router/src/open_router.rs 🔗

@@ -3,10 +3,13 @@ use futures::{AsyncBufReadExt, AsyncReadExt, StreamExt, io::BufReader, stream::B
 use http_client::{AsyncBody, HttpClient, Method, Request as HttpRequest, http};
 use serde::{Deserialize, Serialize};
 use serde_json::Value;
+pub use settings::DataCollection;
+pub use settings::ModelMode;
+pub use settings::OpenRouterAvailableModel as AvailableModel;
+pub use settings::OpenRouterProvider as Provider;
 use std::{convert::TryFrom, io, time::Duration};
 use strum::EnumString;
 use thiserror::Error;
-use util::serde::default_true;
 
 pub const OPEN_ROUTER_API_URL: &str = "https://openrouter.ai/api/v1";
 
@@ -65,41 +68,6 @@ impl From<Role> for String {
     }
 }
 
-#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))]
-#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
-#[serde(rename_all = "lowercase")]
-pub enum DataCollection {
-    Allow,
-    Disallow,
-}
-
-impl Default for DataCollection {
-    fn default() -> Self {
-        Self::Allow
-    }
-}
-
-#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))]
-#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
-pub struct Provider {
-    #[serde(skip_serializing_if = "Option::is_none")]
-    order: Option<Vec<String>>,
-    #[serde(default = "default_true")]
-    allow_fallbacks: bool,
-    #[serde(default)]
-    require_parameters: bool,
-    #[serde(default)]
-    data_collection: DataCollection,
-    #[serde(skip_serializing_if = "Option::is_none")]
-    only: Option<Vec<String>>,
-    #[serde(skip_serializing_if = "Option::is_none")]
-    ignore: Option<Vec<String>>,
-    #[serde(skip_serializing_if = "Option::is_none")]
-    quantizations: Option<Vec<String>>,
-    #[serde(skip_serializing_if = "Option::is_none")]
-    sort: Option<String>,
-}
-
 #[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))]
 #[derive(Clone, Debug, Default, Serialize, Deserialize, PartialEq)]
 pub struct Model {
@@ -113,16 +81,6 @@ pub struct Model {
     pub provider: Option<Provider>,
 }
 
-#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))]
-#[derive(Clone, Debug, Default, Serialize, Deserialize, PartialEq)]
-pub enum ModelMode {
-    #[default]
-    Default,
-    Thinking {
-        budget_tokens: Option<u32>,
-    },
-}
-
 impl Model {
     pub fn default_fast() -> Self {
         Self::new(

crates/settings/src/settings_content.rs 🔗

@@ -1,13 +1,16 @@
 mod agent;
 mod editor;
 mod language;
+mod language_model;
 mod project;
 mod terminal;
 mod theme;
 mod workspace;
+
 pub use agent::*;
 pub use editor::*;
 pub use language::*;
+pub use language_model::*;
 pub use project::*;
 pub use terminal::*;
 pub use theme::*;
@@ -97,6 +100,8 @@ pub struct SettingsContent {
 
     pub line_indicator_format: Option<LineIndicatorFormat>,
 
+    pub language_models: Option<AllLanguageModelSettingsContent>,
+
     pub outline_panel: Option<OutlinePanelSettingsContent>,
 
     /// Configuration for the Message Editor

crates/settings/src/settings_content/language_model.rs 🔗

@@ -0,0 +1,393 @@
+use collections::HashMap;
+use schemars::JsonSchema;
+use serde::{Deserialize, Serialize};
+
+use std::sync::Arc;
+
+#[derive(Default, Clone, Debug, Serialize, Deserialize, PartialEq, JsonSchema)]
+pub struct AllLanguageModelSettingsContent {
+    pub anthropic: Option<AnthropicSettingsContent>,
+    pub bedrock: Option<AmazonBedrockSettingsContent>,
+    pub deepseek: Option<DeepseekSettingsContent>,
+    pub google: Option<GoogleSettingsContent>,
+    pub lmstudio: Option<LmStudioSettingsContent>,
+    pub mistral: Option<MistralSettingsContent>,
+    pub ollama: Option<OllamaSettingsContent>,
+    pub open_router: Option<OpenRouterSettingsContent>,
+    pub openai: Option<OpenAiSettingsContent>,
+    pub openai_compatible: Option<HashMap<Arc<str>, OpenAiCompatibleSettingsContent>>,
+    pub vercel: Option<VercelSettingsContent>,
+    pub x_ai: Option<XAiSettingsContent>,
+    #[serde(rename = "zed.dev")]
+    pub zed_dot_dev: Option<ZedDotDevSettingsContent>,
+}
+
+#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, JsonSchema)]
+pub struct AnthropicSettingsContent {
+    pub api_url: Option<String>,
+    pub available_models: Option<Vec<AnthropicAvailableModel>>,
+}
+
+#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema)]
+pub struct AnthropicAvailableModel {
+    /// The model's name in the Anthropic API. e.g. claude-3-5-sonnet-latest, claude-3-opus-20240229, etc
+    pub name: String,
+    /// The model's name in Zed's UI, such as in the model selector dropdown menu in the assistant panel.
+    pub display_name: Option<String>,
+    /// The model's context window size.
+    pub max_tokens: u64,
+    /// A model `name` to substitute when calling tools, in case the primary model doesn't support tool calling.
+    pub tool_override: Option<String>,
+    /// Configuration of Anthropic's caching API.
+    pub cache_configuration: Option<LanguageModelCacheConfiguration>,
+    pub max_output_tokens: Option<u64>,
+    pub default_temperature: Option<f32>,
+    #[serde(default)]
+    pub extra_beta_headers: Vec<String>,
+    /// The model's mode (e.g. thinking)
+    pub mode: Option<ModelMode>,
+}
+
+#[derive(Default, Clone, Debug, Serialize, Deserialize, PartialEq, JsonSchema)]
+pub struct AmazonBedrockSettingsContent {
+    pub available_models: Option<Vec<BedrockAvailableModel>>,
+    pub endpoint_url: Option<String>,
+    pub region: Option<String>,
+    pub profile: Option<String>,
+    pub authentication_method: Option<BedrockAuthMethodContent>,
+}
+
+#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema)]
+pub struct BedrockAvailableModel {
+    pub name: String,
+    pub display_name: Option<String>,
+    pub max_tokens: u64,
+    pub cache_configuration: Option<LanguageModelCacheConfiguration>,
+    pub max_output_tokens: Option<u64>,
+    pub default_temperature: Option<f32>,
+    pub mode: Option<ModelMode>,
+}
+
+#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema)]
+pub enum BedrockAuthMethodContent {
+    #[serde(rename = "named_profile")]
+    NamedProfile,
+    #[serde(rename = "sso")]
+    SingleSignOn,
+    /// IMDSv2, PodIdentity, env vars, etc.
+    #[serde(rename = "default")]
+    Automatic,
+}
+
+#[derive(Default, Clone, Debug, Serialize, Deserialize, PartialEq, JsonSchema)]
+pub struct OllamaSettingsContent {
+    pub api_url: Option<String>,
+    pub available_models: Option<Vec<OllamaAvailableModel>>,
+}
+
+#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema)]
+pub struct OllamaAvailableModel {
+    /// The model name in the Ollama API (e.g. "llama3.2:latest")
+    pub name: String,
+    /// The model's name in Zed's UI, such as in the model selector dropdown menu in the assistant panel.
+    pub display_name: Option<String>,
+    /// The Context Length parameter to the model (aka num_ctx or n_ctx)
+    pub max_tokens: u64,
+    /// The number of seconds to keep the connection open after the last request
+    pub keep_alive: Option<KeepAlive>,
+    /// Whether the model supports tools
+    pub supports_tools: Option<bool>,
+    /// Whether the model supports vision
+    pub supports_images: Option<bool>,
+    /// Whether to enable think mode
+    pub supports_thinking: Option<bool>,
+}
+
+#[derive(Clone, Serialize, Deserialize, Debug, Eq, PartialEq, JsonSchema)]
+#[serde(untagged)]
+pub enum KeepAlive {
+    /// Keep model alive for N seconds
+    Seconds(isize),
+    /// Keep model alive for a fixed duration. Accepts durations like "5m", "10m", "1h", "1d", etc.
+    Duration(String),
+}
+
+impl KeepAlive {
+    /// Keep model alive until a new model is loaded or until Ollama shuts down
+    pub fn indefinite() -> Self {
+        Self::Seconds(-1)
+    }
+}
+
+impl Default for KeepAlive {
+    fn default() -> Self {
+        Self::indefinite()
+    }
+}
+
+#[derive(Default, Clone, Debug, Serialize, Deserialize, PartialEq, JsonSchema)]
+pub struct LmStudioSettingsContent {
+    pub api_url: Option<String>,
+    pub available_models: Option<Vec<LmStudioAvailableModel>>,
+}
+
+#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema)]
+pub struct LmStudioAvailableModel {
+    pub name: String,
+    pub display_name: Option<String>,
+    pub max_tokens: u64,
+    pub supports_tool_calls: bool,
+    pub supports_images: bool,
+}
+
+#[derive(Default, Clone, Debug, Serialize, Deserialize, PartialEq, JsonSchema)]
+pub struct DeepseekSettingsContent {
+    pub api_url: Option<String>,
+    pub available_models: Option<Vec<DeepseekAvailableModel>>,
+}
+
+#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema)]
+pub struct DeepseekAvailableModel {
+    pub name: String,
+    pub display_name: Option<String>,
+    pub max_tokens: u64,
+    pub max_output_tokens: Option<u64>,
+}
+
+#[derive(Default, Clone, Debug, Serialize, Deserialize, PartialEq, JsonSchema)]
+pub struct MistralSettingsContent {
+    pub api_url: Option<String>,
+    pub available_models: Option<Vec<MistralAvailableModel>>,
+}
+
+#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema)]
+pub struct MistralAvailableModel {
+    pub name: String,
+    pub display_name: Option<String>,
+    pub max_tokens: u64,
+    pub max_output_tokens: Option<u64>,
+    pub max_completion_tokens: Option<u64>,
+    pub supports_tools: Option<bool>,
+    pub supports_images: Option<bool>,
+    pub supports_thinking: Option<bool>,
+}
+
+#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, JsonSchema)]
+pub struct OpenAiSettingsContent {
+    pub api_url: Option<String>,
+    pub available_models: Option<Vec<OpenAiAvailableModel>>,
+}
+
+#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema)]
+pub struct OpenAiAvailableModel {
+    pub name: String,
+    pub display_name: Option<String>,
+    pub max_tokens: u64,
+    pub max_output_tokens: Option<u64>,
+    pub max_completion_tokens: Option<u64>,
+    pub reasoning_effort: Option<OpenAiReasoningEffort>,
+}
+
+#[derive(Debug, Serialize, Deserialize, PartialEq, Clone, JsonSchema)]
+#[serde(rename_all = "lowercase")]
+pub enum OpenAiReasoningEffort {
+    Minimal,
+    Low,
+    Medium,
+    High,
+}
+
+#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, JsonSchema)]
+pub struct OpenAiCompatibleSettingsContent {
+    pub api_url: String,
+    pub available_models: Vec<OpenAiCompatibleAvailableModel>,
+}
+
+#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema)]
+pub struct OpenAiCompatibleAvailableModel {
+    pub name: String,
+    pub display_name: Option<String>,
+    pub max_tokens: u64,
+    pub max_output_tokens: Option<u64>,
+    pub max_completion_tokens: Option<u64>,
+    #[serde(default)]
+    pub capabilities: OpenAiCompatibleModelCapabilities,
+}
+
+#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema)]
+pub struct OpenAiCompatibleModelCapabilities {
+    pub tools: bool,
+    pub images: bool,
+    pub parallel_tool_calls: bool,
+    pub prompt_cache_key: bool,
+}
+
+impl Default for OpenAiCompatibleModelCapabilities {
+    fn default() -> Self {
+        Self {
+            tools: true,
+            images: false,
+            parallel_tool_calls: false,
+            prompt_cache_key: false,
+        }
+    }
+}
+
+#[derive(Default, Clone, Debug, Serialize, Deserialize, PartialEq, JsonSchema)]
+pub struct VercelSettingsContent {
+    pub api_url: Option<String>,
+    pub available_models: Option<Vec<VercelAvailableModel>>,
+}
+
+#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema)]
+pub struct VercelAvailableModel {
+    pub name: String,
+    pub display_name: Option<String>,
+    pub max_tokens: u64,
+    pub max_output_tokens: Option<u64>,
+    pub max_completion_tokens: Option<u64>,
+}
+
+#[derive(Default, Clone, Debug, Serialize, Deserialize, PartialEq, JsonSchema)]
+pub struct GoogleSettingsContent {
+    pub api_url: Option<String>,
+    pub available_models: Option<Vec<GoogleAvailableModel>>,
+}
+
+#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema)]
+pub struct GoogleAvailableModel {
+    pub name: String,
+    pub display_name: Option<String>,
+    pub max_tokens: u64,
+    pub mode: Option<ModelMode>,
+}
+
+#[derive(Default, Clone, Debug, Serialize, Deserialize, PartialEq, JsonSchema)]
+pub struct XAiSettingsContent {
+    pub api_url: Option<String>,
+    pub available_models: Option<Vec<XaiAvailableModel>>,
+}
+
+#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema)]
+pub struct XaiAvailableModel {
+    pub name: String,
+    pub display_name: Option<String>,
+    pub max_tokens: u64,
+    pub max_output_tokens: Option<u64>,
+    pub max_completion_tokens: Option<u64>,
+}
+
+#[derive(Default, Clone, Debug, Serialize, Deserialize, PartialEq, JsonSchema)]
+pub struct ZedDotDevSettingsContent {
+    pub available_models: Option<Vec<ZedDotDevAvailableModel>>,
+}
+
+#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema)]
+pub struct ZedDotDevAvailableModel {
+    /// The provider of the language model.
+    pub provider: ZedDotDevAvailableProvider,
+    /// The model's name in the provider's API. e.g. claude-3-5-sonnet-20240620
+    pub name: String,
+    /// The name displayed in the UI, such as in the assistant panel model dropdown menu.
+    pub display_name: Option<String>,
+    /// The size of the context window, indicating the maximum number of tokens the model can process.
+    pub max_tokens: usize,
+    /// The maximum number of output tokens allowed by the model.
+    pub max_output_tokens: Option<u64>,
+    /// The maximum number of completion tokens allowed by the model (o1-* only)
+    pub max_completion_tokens: Option<u64>,
+    /// Override this model with a different Anthropic model for tool calls.
+    pub tool_override: Option<String>,
+    /// Indicates whether this custom model supports caching.
+    pub cache_configuration: Option<LanguageModelCacheConfiguration>,
+    /// The default temperature to use for this model.
+    pub default_temperature: Option<f32>,
+    /// Any extra beta headers to provide when using the model.
+    #[serde(default)]
+    pub extra_beta_headers: Vec<String>,
+    /// The model's mode (e.g. thinking)
+    pub mode: Option<ModelMode>,
+}
+
+#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema)]
+#[serde(rename_all = "lowercase")]
+pub enum ZedDotDevAvailableProvider {
+    Anthropic,
+    OpenAi,
+    Google,
+}
+
+#[derive(Default, Clone, Debug, Serialize, Deserialize, PartialEq, JsonSchema)]
+pub struct OpenRouterSettingsContent {
+    pub api_url: Option<String>,
+    pub available_models: Option<Vec<OpenRouterAvailableModel>>,
+}
+
+#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema)]
+pub struct OpenRouterAvailableModel {
+    pub name: String,
+    pub display_name: Option<String>,
+    pub max_tokens: u64,
+    pub max_output_tokens: Option<u64>,
+    pub max_completion_tokens: Option<u64>,
+    pub supports_tools: Option<bool>,
+    pub supports_images: Option<bool>,
+    pub mode: Option<ModelMode>,
+    pub provider: Option<OpenRouterProvider>,
+}
+
+#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema)]
+pub struct OpenRouterProvider {
+    #[serde(skip_serializing_if = "Option::is_none")]
+    order: Option<Vec<String>>,
+    #[serde(default = "default_true")]
+    allow_fallbacks: bool,
+    #[serde(default)]
+    require_parameters: bool,
+    #[serde(default)]
+    data_collection: DataCollection,
+    #[serde(skip_serializing_if = "Option::is_none")]
+    only: Option<Vec<String>>,
+    #[serde(skip_serializing_if = "Option::is_none")]
+    ignore: Option<Vec<String>>,
+    #[serde(skip_serializing_if = "Option::is_none")]
+    quantizations: Option<Vec<String>>,
+    #[serde(skip_serializing_if = "Option::is_none")]
+    sort: Option<String>,
+}
+
+#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema)]
+#[serde(rename_all = "lowercase")]
+pub enum DataCollection {
+    Allow,
+    Disallow,
+}
+
+impl Default for DataCollection {
+    fn default() -> Self {
+        Self::Allow
+    }
+}
+
+fn default_true() -> bool {
+    true
+}
+
+/// Configuration for caching language model messages.
+#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema)]
+pub struct LanguageModelCacheConfiguration {
+    pub max_cache_anchors: usize,
+    pub should_speculate: bool,
+    pub min_total_token: u64,
+}
+
+#[derive(Copy, Clone, Debug, Default, PartialEq, Eq, Serialize, Deserialize, JsonSchema)]
+#[serde(tag = "type", rename_all = "lowercase")]
+pub enum ModelMode {
+    #[default]
+    Default,
+    Thinking {
+        /// The maximum number of tokens to use for reasoning. Must be lower than the model's `max_output_tokens`.
+        budget_tokens: Option<u32>,
+    },
+}