1use collections::HashMap;
2use schemars::JsonSchema;
3use serde::{Deserialize, Serialize};
4use settings_macros::{MergeFrom, with_fallible_options};
5use strum::EnumString;
6
7use std::sync::Arc;
8
9#[with_fallible_options]
10#[derive(Default, Clone, Debug, Serialize, Deserialize, PartialEq, JsonSchema, MergeFrom)]
11pub struct AllLanguageModelSettingsContent {
12 pub anthropic: Option<AnthropicSettingsContent>,
13 pub bedrock: Option<AmazonBedrockSettingsContent>,
14 pub deepseek: Option<DeepseekSettingsContent>,
15 pub google: Option<GoogleSettingsContent>,
16 pub lmstudio: Option<LmStudioSettingsContent>,
17 pub mistral: Option<MistralSettingsContent>,
18 pub ollama: Option<OllamaSettingsContent>,
19 pub open_router: Option<OpenRouterSettingsContent>,
20 pub openai: Option<OpenAiSettingsContent>,
21 pub openai_compatible: Option<HashMap<Arc<str>, OpenAiCompatibleSettingsContent>>,
22 pub vercel: Option<VercelSettingsContent>,
23 pub vercel_ai_gateway: Option<VercelAiGatewaySettingsContent>,
24 pub x_ai: Option<XAiSettingsContent>,
25 #[serde(rename = "zed.dev")]
26 pub zed_dot_dev: Option<ZedDotDevSettingsContent>,
27}
28
29#[with_fallible_options]
30#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, JsonSchema, MergeFrom)]
31pub struct AnthropicSettingsContent {
32 pub api_url: Option<String>,
33 pub available_models: Option<Vec<AnthropicAvailableModel>>,
34}
35
36#[with_fallible_options]
37#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema, MergeFrom)]
38pub struct AnthropicAvailableModel {
39 /// The model's name in the Anthropic API. e.g. claude-3-5-sonnet-latest, claude-3-opus-20240229, etc
40 pub name: String,
41 /// The model's name in Zed's UI, such as in the model selector dropdown menu in the assistant panel.
42 pub display_name: Option<String>,
43 /// The model's context window size.
44 pub max_tokens: u64,
45 /// A model `name` to substitute when calling tools, in case the primary model doesn't support tool calling.
46 pub tool_override: Option<String>,
47 /// Configuration of Anthropic's caching API.
48 pub cache_configuration: Option<LanguageModelCacheConfiguration>,
49 pub max_output_tokens: Option<u64>,
50 #[serde(serialize_with = "crate::serialize_optional_f32_with_two_decimal_places")]
51 pub default_temperature: Option<f32>,
52 #[serde(default)]
53 pub extra_beta_headers: Vec<String>,
54 /// The model's mode (e.g. thinking)
55 pub mode: Option<ModelMode>,
56}
57
58#[with_fallible_options]
59#[derive(Default, Clone, Debug, Serialize, Deserialize, PartialEq, JsonSchema, MergeFrom)]
60pub struct AmazonBedrockSettingsContent {
61 pub available_models: Option<Vec<BedrockAvailableModel>>,
62 pub endpoint_url: Option<String>,
63 pub region: Option<String>,
64 pub profile: Option<String>,
65 pub authentication_method: Option<BedrockAuthMethodContent>,
66 pub allow_global: Option<bool>,
67 /// Enable the 1M token extended context window beta for supported Anthropic models.
68 pub allow_extended_context: Option<bool>,
69}
70
71#[with_fallible_options]
72#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema, MergeFrom)]
73pub struct BedrockAvailableModel {
74 pub name: String,
75 pub display_name: Option<String>,
76 pub max_tokens: u64,
77 pub cache_configuration: Option<LanguageModelCacheConfiguration>,
78 pub max_output_tokens: Option<u64>,
79 #[serde(serialize_with = "crate::serialize_optional_f32_with_two_decimal_places")]
80 pub default_temperature: Option<f32>,
81 pub mode: Option<ModelMode>,
82}
83
84#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema, MergeFrom)]
85pub enum BedrockAuthMethodContent {
86 #[serde(rename = "named_profile")]
87 NamedProfile,
88 #[serde(rename = "sso")]
89 SingleSignOn,
90 #[serde(rename = "api_key")]
91 ApiKey,
92 /// IMDSv2, PodIdentity, env vars, etc.
93 #[serde(rename = "default")]
94 Automatic,
95}
96
97#[with_fallible_options]
98#[derive(Default, Clone, Debug, Serialize, Deserialize, PartialEq, JsonSchema, MergeFrom)]
99pub struct OllamaSettingsContent {
100 pub api_url: Option<String>,
101 pub auto_discover: Option<bool>,
102 pub available_models: Option<Vec<OllamaAvailableModel>>,
103 pub context_window: Option<u64>,
104}
105
106#[with_fallible_options]
107#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema, MergeFrom)]
108pub struct OllamaAvailableModel {
109 /// The model name in the Ollama API (e.g. "llama3.2:latest")
110 pub name: String,
111 /// The model's name in Zed's UI, such as in the model selector dropdown menu in the assistant panel.
112 pub display_name: Option<String>,
113 /// The Context Length parameter to the model (aka num_ctx or n_ctx)
114 pub max_tokens: u64,
115 /// The number of seconds to keep the connection open after the last request
116 pub keep_alive: Option<KeepAlive>,
117 /// Whether the model supports tools
118 pub supports_tools: Option<bool>,
119 /// Whether the model supports vision
120 pub supports_images: Option<bool>,
121 /// Whether to enable think mode
122 pub supports_thinking: Option<bool>,
123}
124
125#[derive(Clone, Serialize, Deserialize, Debug, Eq, PartialEq, JsonSchema, MergeFrom)]
126#[serde(untagged)]
127pub enum KeepAlive {
128 /// Keep model alive for N seconds
129 Seconds(isize),
130 /// Keep model alive for a fixed duration. Accepts durations like "5m", "10m", "1h", "1d", etc.
131 Duration(String),
132}
133
134impl KeepAlive {
135 /// Keep model alive until a new model is loaded or until Ollama shuts down
136 pub fn indefinite() -> Self {
137 Self::Seconds(-1)
138 }
139}
140
141impl Default for KeepAlive {
142 fn default() -> Self {
143 Self::indefinite()
144 }
145}
146
147#[with_fallible_options]
148#[derive(Default, Clone, Debug, Serialize, Deserialize, PartialEq, JsonSchema, MergeFrom)]
149pub struct LmStudioSettingsContent {
150 pub api_url: Option<String>,
151 pub available_models: Option<Vec<LmStudioAvailableModel>>,
152}
153
154#[with_fallible_options]
155#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema, MergeFrom)]
156pub struct LmStudioAvailableModel {
157 pub name: String,
158 pub display_name: Option<String>,
159 pub max_tokens: u64,
160 pub supports_tool_calls: bool,
161 pub supports_images: bool,
162}
163
164#[with_fallible_options]
165#[derive(Default, Clone, Debug, Serialize, Deserialize, PartialEq, JsonSchema, MergeFrom)]
166pub struct DeepseekSettingsContent {
167 pub api_url: Option<String>,
168 pub available_models: Option<Vec<DeepseekAvailableModel>>,
169}
170
171#[with_fallible_options]
172#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema, MergeFrom)]
173pub struct DeepseekAvailableModel {
174 pub name: String,
175 pub display_name: Option<String>,
176 pub max_tokens: u64,
177 pub max_output_tokens: Option<u64>,
178}
179
180#[with_fallible_options]
181#[derive(Default, Clone, Debug, Serialize, Deserialize, PartialEq, JsonSchema, MergeFrom)]
182pub struct MistralSettingsContent {
183 pub api_url: Option<String>,
184 pub available_models: Option<Vec<MistralAvailableModel>>,
185}
186
187#[with_fallible_options]
188#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema, MergeFrom)]
189pub struct MistralAvailableModel {
190 pub name: String,
191 pub display_name: Option<String>,
192 pub max_tokens: u64,
193 pub max_output_tokens: Option<u64>,
194 pub max_completion_tokens: Option<u64>,
195 pub supports_tools: Option<bool>,
196 pub supports_images: Option<bool>,
197 pub supports_thinking: Option<bool>,
198}
199
200#[with_fallible_options]
201#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, JsonSchema, MergeFrom)]
202pub struct OpenAiSettingsContent {
203 pub api_url: Option<String>,
204 pub available_models: Option<Vec<OpenAiAvailableModel>>,
205}
206
207#[with_fallible_options]
208#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema, MergeFrom)]
209pub struct OpenAiAvailableModel {
210 pub name: String,
211 pub display_name: Option<String>,
212 pub max_tokens: u64,
213 pub max_output_tokens: Option<u64>,
214 pub max_completion_tokens: Option<u64>,
215 pub reasoning_effort: Option<OpenAiReasoningEffort>,
216 #[serde(default)]
217 pub capabilities: OpenAiModelCapabilities,
218}
219
220#[derive(Debug, Serialize, Deserialize, PartialEq, Clone, EnumString, JsonSchema, MergeFrom)]
221#[serde(rename_all = "lowercase")]
222#[strum(serialize_all = "lowercase")]
223pub enum OpenAiReasoningEffort {
224 Minimal,
225 Low,
226 Medium,
227 High,
228 XHigh,
229}
230
231#[with_fallible_options]
232#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, JsonSchema, MergeFrom)]
233pub struct OpenAiCompatibleSettingsContent {
234 pub api_url: String,
235 pub available_models: Vec<OpenAiCompatibleAvailableModel>,
236}
237
238#[with_fallible_options]
239#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema, MergeFrom)]
240pub struct OpenAiModelCapabilities {
241 #[serde(default = "default_true")]
242 pub chat_completions: bool,
243}
244
245impl Default for OpenAiModelCapabilities {
246 fn default() -> Self {
247 Self {
248 chat_completions: default_true(),
249 }
250 }
251}
252
253#[with_fallible_options]
254#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema, MergeFrom)]
255pub struct OpenAiCompatibleAvailableModel {
256 pub name: String,
257 pub display_name: Option<String>,
258 pub max_tokens: u64,
259 pub max_output_tokens: Option<u64>,
260 pub max_completion_tokens: Option<u64>,
261 #[serde(default)]
262 pub capabilities: OpenAiCompatibleModelCapabilities,
263}
264
265#[with_fallible_options]
266#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema, MergeFrom)]
267pub struct OpenAiCompatibleModelCapabilities {
268 pub tools: bool,
269 pub images: bool,
270 pub parallel_tool_calls: bool,
271 pub prompt_cache_key: bool,
272 #[serde(default = "default_true")]
273 pub chat_completions: bool,
274}
275
276impl Default for OpenAiCompatibleModelCapabilities {
277 fn default() -> Self {
278 Self {
279 tools: true,
280 images: false,
281 parallel_tool_calls: false,
282 prompt_cache_key: false,
283 chat_completions: default_true(),
284 }
285 }
286}
287
288#[with_fallible_options]
289#[derive(Default, Clone, Debug, Serialize, Deserialize, PartialEq, JsonSchema, MergeFrom)]
290pub struct VercelSettingsContent {
291 pub api_url: Option<String>,
292 pub available_models: Option<Vec<VercelAvailableModel>>,
293}
294
295#[with_fallible_options]
296#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema, MergeFrom)]
297pub struct VercelAvailableModel {
298 pub name: String,
299 pub display_name: Option<String>,
300 pub max_tokens: u64,
301 pub max_output_tokens: Option<u64>,
302 pub max_completion_tokens: Option<u64>,
303}
304
305#[with_fallible_options]
306#[derive(Default, Clone, Debug, Serialize, Deserialize, PartialEq, JsonSchema, MergeFrom)]
307pub struct VercelAiGatewaySettingsContent {
308 pub api_url: Option<String>,
309 pub available_models: Option<Vec<VercelAiGatewayAvailableModel>>,
310}
311
312#[with_fallible_options]
313#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema, MergeFrom)]
314pub struct VercelAiGatewayAvailableModel {
315 pub name: String,
316 pub display_name: Option<String>,
317 pub max_tokens: u64,
318 pub max_output_tokens: Option<u64>,
319 pub max_completion_tokens: Option<u64>,
320 #[serde(default)]
321 pub capabilities: OpenAiCompatibleModelCapabilities,
322}
323
324#[with_fallible_options]
325#[derive(Default, Clone, Debug, Serialize, Deserialize, PartialEq, JsonSchema, MergeFrom)]
326pub struct GoogleSettingsContent {
327 pub api_url: Option<String>,
328 pub available_models: Option<Vec<GoogleAvailableModel>>,
329}
330
331#[with_fallible_options]
332#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema, MergeFrom)]
333pub struct GoogleAvailableModel {
334 pub name: String,
335 pub display_name: Option<String>,
336 pub max_tokens: u64,
337 pub mode: Option<ModelMode>,
338}
339
340#[with_fallible_options]
341#[derive(Default, Clone, Debug, Serialize, Deserialize, PartialEq, JsonSchema, MergeFrom)]
342pub struct XAiSettingsContent {
343 pub api_url: Option<String>,
344 pub available_models: Option<Vec<XaiAvailableModel>>,
345}
346
347#[with_fallible_options]
348#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema, MergeFrom)]
349pub struct XaiAvailableModel {
350 pub name: String,
351 pub display_name: Option<String>,
352 pub max_tokens: u64,
353 pub max_output_tokens: Option<u64>,
354 pub max_completion_tokens: Option<u64>,
355 pub supports_images: Option<bool>,
356 pub supports_tools: Option<bool>,
357 pub parallel_tool_calls: Option<bool>,
358}
359
360#[with_fallible_options]
361#[derive(Default, Clone, Debug, Serialize, Deserialize, PartialEq, JsonSchema, MergeFrom)]
362pub struct ZedDotDevSettingsContent {
363 pub available_models: Option<Vec<ZedDotDevAvailableModel>>,
364}
365
366#[with_fallible_options]
367#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema, MergeFrom)]
368pub struct ZedDotDevAvailableModel {
369 /// The provider of the language model.
370 pub provider: ZedDotDevAvailableProvider,
371 /// The model's name in the provider's API. e.g. claude-3-5-sonnet-20240620
372 pub name: String,
373 /// The name displayed in the UI, such as in the assistant panel model dropdown menu.
374 pub display_name: Option<String>,
375 /// The size of the context window, indicating the maximum number of tokens the model can process.
376 pub max_tokens: usize,
377 /// The maximum number of output tokens allowed by the model.
378 pub max_output_tokens: Option<u64>,
379 /// The maximum number of completion tokens allowed by the model (o1-* only)
380 pub max_completion_tokens: Option<u64>,
381 /// Override this model with a different Anthropic model for tool calls.
382 pub tool_override: Option<String>,
383 /// Indicates whether this custom model supports caching.
384 pub cache_configuration: Option<LanguageModelCacheConfiguration>,
385 /// The default temperature to use for this model.
386 #[serde(serialize_with = "crate::serialize_optional_f32_with_two_decimal_places")]
387 pub default_temperature: Option<f32>,
388 /// Any extra beta headers to provide when using the model.
389 #[serde(default)]
390 pub extra_beta_headers: Vec<String>,
391 /// The model's mode (e.g. thinking)
392 pub mode: Option<ModelMode>,
393}
394
395#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema, MergeFrom)]
396#[serde(rename_all = "lowercase")]
397pub enum ZedDotDevAvailableProvider {
398 Anthropic,
399 OpenAi,
400 Google,
401}
402
403#[with_fallible_options]
404#[derive(Default, Clone, Debug, Serialize, Deserialize, PartialEq, JsonSchema, MergeFrom)]
405pub struct OpenRouterSettingsContent {
406 pub api_url: Option<String>,
407 pub available_models: Option<Vec<OpenRouterAvailableModel>>,
408}
409
410#[with_fallible_options]
411#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema, MergeFrom)]
412pub struct OpenRouterAvailableModel {
413 pub name: String,
414 pub display_name: Option<String>,
415 pub max_tokens: u64,
416 pub max_output_tokens: Option<u64>,
417 pub max_completion_tokens: Option<u64>,
418 pub supports_tools: Option<bool>,
419 pub supports_images: Option<bool>,
420 pub mode: Option<ModelMode>,
421 pub provider: Option<OpenRouterProvider>,
422}
423
424#[with_fallible_options]
425#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema, MergeFrom)]
426pub struct OpenRouterProvider {
427 order: Option<Vec<String>>,
428 #[serde(default = "default_true")]
429 allow_fallbacks: bool,
430 #[serde(default)]
431 require_parameters: bool,
432 #[serde(default)]
433 data_collection: DataCollection,
434 only: Option<Vec<String>>,
435 ignore: Option<Vec<String>>,
436 quantizations: Option<Vec<String>>,
437 sort: Option<String>,
438}
439
440#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize, JsonSchema, MergeFrom)]
441#[serde(rename_all = "lowercase")]
442pub enum DataCollection {
443 #[default]
444 Allow,
445 Disallow,
446}
447
448fn default_true() -> bool {
449 true
450}
451
452/// Configuration for caching language model messages.
453#[with_fallible_options]
454#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema, MergeFrom)]
455pub struct LanguageModelCacheConfiguration {
456 pub max_cache_anchors: usize,
457 pub should_speculate: bool,
458 pub min_total_token: u64,
459}
460
461#[derive(
462 Copy, Clone, Debug, Default, PartialEq, Eq, Serialize, Deserialize, JsonSchema, MergeFrom,
463)]
464#[serde(tag = "type", rename_all = "lowercase")]
465pub enum ModelMode {
466 #[default]
467 Default,
468 Thinking {
469 /// The maximum number of tokens to use for reasoning. Must be lower than the model's `max_output_tokens`.
470 budget_tokens: Option<u32>,
471 },
472}