1use collections::HashMap;
2use schemars::JsonSchema;
3use serde::{Deserialize, Serialize};
4use serde_with::skip_serializing_none;
5use settings_macros::MergeFrom;
6
7use std::sync::Arc;
8
9#[skip_serializing_none]
10#[derive(Default, Clone, Debug, Serialize, Deserialize, PartialEq, JsonSchema, MergeFrom)]
11pub struct AllLanguageModelSettingsContent {
12 pub anthropic: Option<AnthropicSettingsContent>,
13 pub bedrock: Option<AmazonBedrockSettingsContent>,
14 pub deepseek: Option<DeepseekSettingsContent>,
15 pub google: Option<GoogleSettingsContent>,
16 pub lmstudio: Option<LmStudioSettingsContent>,
17 pub mistral: Option<MistralSettingsContent>,
18 pub ollama: Option<OllamaSettingsContent>,
19 pub open_router: Option<OpenRouterSettingsContent>,
20 pub openai: Option<OpenAiSettingsContent>,
21 pub openai_compatible: Option<HashMap<Arc<str>, OpenAiCompatibleSettingsContent>>,
22 pub vercel: Option<VercelSettingsContent>,
23 pub x_ai: Option<XAiSettingsContent>,
24 #[serde(rename = "zed.dev")]
25 pub zed_dot_dev: Option<ZedDotDevSettingsContent>,
26}
27
28#[skip_serializing_none]
29#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, JsonSchema, MergeFrom)]
30pub struct AnthropicSettingsContent {
31 pub api_url: Option<String>,
32 pub available_models: Option<Vec<AnthropicAvailableModel>>,
33}
34
35#[skip_serializing_none]
36#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema, MergeFrom)]
37pub struct AnthropicAvailableModel {
38 /// The model's name in the Anthropic API. e.g. claude-3-5-sonnet-latest, claude-3-opus-20240229, etc
39 pub name: String,
40 /// The model's name in Zed's UI, such as in the model selector dropdown menu in the assistant panel.
41 pub display_name: Option<String>,
42 /// The model's context window size.
43 pub max_tokens: u64,
44 /// A model `name` to substitute when calling tools, in case the primary model doesn't support tool calling.
45 pub tool_override: Option<String>,
46 /// Configuration of Anthropic's caching API.
47 pub cache_configuration: Option<LanguageModelCacheConfiguration>,
48 pub max_output_tokens: Option<u64>,
49 pub default_temperature: Option<f32>,
50 #[serde(default)]
51 pub extra_beta_headers: Vec<String>,
52 /// The model's mode (e.g. thinking)
53 pub mode: Option<ModelMode>,
54}
55
56#[skip_serializing_none]
57#[derive(Default, Clone, Debug, Serialize, Deserialize, PartialEq, JsonSchema, MergeFrom)]
58pub struct AmazonBedrockSettingsContent {
59 pub available_models: Option<Vec<BedrockAvailableModel>>,
60 pub endpoint_url: Option<String>,
61 pub region: Option<String>,
62 pub profile: Option<String>,
63 pub authentication_method: Option<BedrockAuthMethodContent>,
64}
65
66#[skip_serializing_none]
67#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema, MergeFrom)]
68pub struct BedrockAvailableModel {
69 pub name: String,
70 pub display_name: Option<String>,
71 pub max_tokens: u64,
72 pub cache_configuration: Option<LanguageModelCacheConfiguration>,
73 pub max_output_tokens: Option<u64>,
74 pub default_temperature: Option<f32>,
75 pub mode: Option<ModelMode>,
76}
77
78#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema, MergeFrom)]
79pub enum BedrockAuthMethodContent {
80 #[serde(rename = "named_profile")]
81 NamedProfile,
82 #[serde(rename = "sso")]
83 SingleSignOn,
84 /// IMDSv2, PodIdentity, env vars, etc.
85 #[serde(rename = "default")]
86 Automatic,
87}
88
89#[skip_serializing_none]
90#[derive(Default, Clone, Debug, Serialize, Deserialize, PartialEq, JsonSchema, MergeFrom)]
91pub struct OllamaSettingsContent {
92 pub api_url: Option<String>,
93 pub available_models: Option<Vec<OllamaAvailableModel>>,
94}
95
96#[skip_serializing_none]
97#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema, MergeFrom)]
98pub struct OllamaAvailableModel {
99 /// The model name in the Ollama API (e.g. "llama3.2:latest")
100 pub name: String,
101 /// The model's name in Zed's UI, such as in the model selector dropdown menu in the assistant panel.
102 pub display_name: Option<String>,
103 /// The Context Length parameter to the model (aka num_ctx or n_ctx)
104 pub max_tokens: u64,
105 /// The number of seconds to keep the connection open after the last request
106 pub keep_alive: Option<KeepAlive>,
107 /// Whether the model supports tools
108 pub supports_tools: Option<bool>,
109 /// Whether the model supports vision
110 pub supports_images: Option<bool>,
111 /// Whether to enable think mode
112 pub supports_thinking: Option<bool>,
113}
114
115#[derive(Clone, Serialize, Deserialize, Debug, Eq, PartialEq, JsonSchema, MergeFrom)]
116#[serde(untagged)]
117pub enum KeepAlive {
118 /// Keep model alive for N seconds
119 Seconds(isize),
120 /// Keep model alive for a fixed duration. Accepts durations like "5m", "10m", "1h", "1d", etc.
121 Duration(String),
122}
123
124impl KeepAlive {
125 /// Keep model alive until a new model is loaded or until Ollama shuts down
126 pub const fn indefinite() -> Self {
127 Self::Seconds(-1)
128 }
129}
130
131impl Default for KeepAlive {
132 fn default() -> Self {
133 Self::indefinite()
134 }
135}
136
137#[skip_serializing_none]
138#[derive(Default, Clone, Debug, Serialize, Deserialize, PartialEq, JsonSchema, MergeFrom)]
139pub struct LmStudioSettingsContent {
140 pub api_url: Option<String>,
141 pub available_models: Option<Vec<LmStudioAvailableModel>>,
142}
143
144#[skip_serializing_none]
145#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema, MergeFrom)]
146pub struct LmStudioAvailableModel {
147 pub name: String,
148 pub display_name: Option<String>,
149 pub max_tokens: u64,
150 pub supports_tool_calls: bool,
151 pub supports_images: bool,
152}
153
154#[skip_serializing_none]
155#[derive(Default, Clone, Debug, Serialize, Deserialize, PartialEq, JsonSchema, MergeFrom)]
156pub struct DeepseekSettingsContent {
157 pub api_url: Option<String>,
158 pub available_models: Option<Vec<DeepseekAvailableModel>>,
159}
160
161#[skip_serializing_none]
162#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema, MergeFrom)]
163pub struct DeepseekAvailableModel {
164 pub name: String,
165 pub display_name: Option<String>,
166 pub max_tokens: u64,
167 pub max_output_tokens: Option<u64>,
168}
169
170#[skip_serializing_none]
171#[derive(Default, Clone, Debug, Serialize, Deserialize, PartialEq, JsonSchema, MergeFrom)]
172pub struct MistralSettingsContent {
173 pub api_url: Option<String>,
174 pub available_models: Option<Vec<MistralAvailableModel>>,
175}
176
177#[skip_serializing_none]
178#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema, MergeFrom)]
179pub struct MistralAvailableModel {
180 pub name: String,
181 pub display_name: Option<String>,
182 pub max_tokens: u64,
183 pub max_output_tokens: Option<u64>,
184 pub max_completion_tokens: Option<u64>,
185 pub supports_tools: Option<bool>,
186 pub supports_images: Option<bool>,
187 pub supports_thinking: Option<bool>,
188}
189
190#[skip_serializing_none]
191#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, JsonSchema, MergeFrom)]
192pub struct OpenAiSettingsContent {
193 pub api_url: Option<String>,
194 pub available_models: Option<Vec<OpenAiAvailableModel>>,
195}
196
197#[skip_serializing_none]
198#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema, MergeFrom)]
199pub struct OpenAiAvailableModel {
200 pub name: String,
201 pub display_name: Option<String>,
202 pub max_tokens: u64,
203 pub max_output_tokens: Option<u64>,
204 pub max_completion_tokens: Option<u64>,
205 pub reasoning_effort: Option<OpenAiReasoningEffort>,
206}
207
208#[derive(Debug, Serialize, Deserialize, PartialEq, Clone, JsonSchema, MergeFrom)]
209#[serde(rename_all = "lowercase")]
210pub enum OpenAiReasoningEffort {
211 Minimal,
212 Low,
213 Medium,
214 High,
215}
216
217#[skip_serializing_none]
218#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, JsonSchema, MergeFrom)]
219pub struct OpenAiCompatibleSettingsContent {
220 pub api_url: String,
221 pub available_models: Vec<OpenAiCompatibleAvailableModel>,
222}
223
224#[skip_serializing_none]
225#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema, MergeFrom)]
226pub struct OpenAiCompatibleAvailableModel {
227 pub name: String,
228 pub display_name: Option<String>,
229 pub max_tokens: u64,
230 pub max_output_tokens: Option<u64>,
231 pub max_completion_tokens: Option<u64>,
232 #[serde(default)]
233 pub capabilities: OpenAiCompatibleModelCapabilities,
234}
235
236#[skip_serializing_none]
237#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema, MergeFrom)]
238pub struct OpenAiCompatibleModelCapabilities {
239 pub tools: bool,
240 pub images: bool,
241 pub parallel_tool_calls: bool,
242 pub prompt_cache_key: bool,
243}
244
245impl Default for OpenAiCompatibleModelCapabilities {
246 fn default() -> Self {
247 Self {
248 tools: true,
249 images: false,
250 parallel_tool_calls: false,
251 prompt_cache_key: false,
252 }
253 }
254}
255
256#[skip_serializing_none]
257#[derive(Default, Clone, Debug, Serialize, Deserialize, PartialEq, JsonSchema, MergeFrom)]
258pub struct VercelSettingsContent {
259 pub api_url: Option<String>,
260 pub available_models: Option<Vec<VercelAvailableModel>>,
261}
262
263#[skip_serializing_none]
264#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema, MergeFrom)]
265pub struct VercelAvailableModel {
266 pub name: String,
267 pub display_name: Option<String>,
268 pub max_tokens: u64,
269 pub max_output_tokens: Option<u64>,
270 pub max_completion_tokens: Option<u64>,
271}
272
273#[skip_serializing_none]
274#[derive(Default, Clone, Debug, Serialize, Deserialize, PartialEq, JsonSchema, MergeFrom)]
275pub struct GoogleSettingsContent {
276 pub api_url: Option<String>,
277 pub available_models: Option<Vec<GoogleAvailableModel>>,
278}
279
280#[skip_serializing_none]
281#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema, MergeFrom)]
282pub struct GoogleAvailableModel {
283 pub name: String,
284 pub display_name: Option<String>,
285 pub max_tokens: u64,
286 pub mode: Option<ModelMode>,
287}
288
289#[skip_serializing_none]
290#[derive(Default, Clone, Debug, Serialize, Deserialize, PartialEq, JsonSchema, MergeFrom)]
291pub struct XAiSettingsContent {
292 pub api_url: Option<String>,
293 pub available_models: Option<Vec<XaiAvailableModel>>,
294}
295
296#[skip_serializing_none]
297#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema, MergeFrom)]
298pub struct XaiAvailableModel {
299 pub name: String,
300 pub display_name: Option<String>,
301 pub max_tokens: u64,
302 pub max_output_tokens: Option<u64>,
303 pub max_completion_tokens: Option<u64>,
304 pub supports_images: Option<bool>,
305 pub supports_tools: Option<bool>,
306 pub parallel_tool_calls: Option<bool>,
307}
308
309#[skip_serializing_none]
310#[derive(Default, Clone, Debug, Serialize, Deserialize, PartialEq, JsonSchema, MergeFrom)]
311pub struct ZedDotDevSettingsContent {
312 pub available_models: Option<Vec<ZedDotDevAvailableModel>>,
313}
314
315#[skip_serializing_none]
316#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema, MergeFrom)]
317pub struct ZedDotDevAvailableModel {
318 /// The provider of the language model.
319 pub provider: ZedDotDevAvailableProvider,
320 /// The model's name in the provider's API. e.g. claude-3-5-sonnet-20240620
321 pub name: String,
322 /// The name displayed in the UI, such as in the assistant panel model dropdown menu.
323 pub display_name: Option<String>,
324 /// The size of the context window, indicating the maximum number of tokens the model can process.
325 pub max_tokens: usize,
326 /// The maximum number of output tokens allowed by the model.
327 pub max_output_tokens: Option<u64>,
328 /// The maximum number of completion tokens allowed by the model (o1-* only)
329 pub max_completion_tokens: Option<u64>,
330 /// Override this model with a different Anthropic model for tool calls.
331 pub tool_override: Option<String>,
332 /// Indicates whether this custom model supports caching.
333 pub cache_configuration: Option<LanguageModelCacheConfiguration>,
334 /// The default temperature to use for this model.
335 pub default_temperature: Option<f32>,
336 /// Any extra beta headers to provide when using the model.
337 #[serde(default)]
338 pub extra_beta_headers: Vec<String>,
339 /// The model's mode (e.g. thinking)
340 pub mode: Option<ModelMode>,
341}
342
343#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema, MergeFrom)]
344#[serde(rename_all = "lowercase")]
345pub enum ZedDotDevAvailableProvider {
346 Anthropic,
347 OpenAi,
348 Google,
349}
350
351#[skip_serializing_none]
352#[derive(Default, Clone, Debug, Serialize, Deserialize, PartialEq, JsonSchema, MergeFrom)]
353pub struct OpenRouterSettingsContent {
354 pub api_url: Option<String>,
355 pub available_models: Option<Vec<OpenRouterAvailableModel>>,
356}
357
358#[skip_serializing_none]
359#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema, MergeFrom)]
360pub struct OpenRouterAvailableModel {
361 pub name: String,
362 pub display_name: Option<String>,
363 pub max_tokens: u64,
364 pub max_output_tokens: Option<u64>,
365 pub max_completion_tokens: Option<u64>,
366 pub supports_tools: Option<bool>,
367 pub supports_images: Option<bool>,
368 pub mode: Option<ModelMode>,
369 pub provider: Option<OpenRouterProvider>,
370}
371
372#[skip_serializing_none]
373#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema, MergeFrom)]
374pub struct OpenRouterProvider {
375 order: Option<Vec<String>>,
376 #[serde(default = "default_true")]
377 allow_fallbacks: bool,
378 #[serde(default)]
379 require_parameters: bool,
380 #[serde(default)]
381 data_collection: DataCollection,
382 only: Option<Vec<String>>,
383 ignore: Option<Vec<String>>,
384 quantizations: Option<Vec<String>>,
385 sort: Option<String>,
386}
387
388#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema, MergeFrom)]
389#[serde(rename_all = "lowercase")]
390pub enum DataCollection {
391 Allow,
392 Disallow,
393}
394
395impl Default for DataCollection {
396 fn default() -> Self {
397 Self::Allow
398 }
399}
400
401const fn default_true() -> bool {
402 true
403}
404
405/// Configuration for caching language model messages.
406#[skip_serializing_none]
407#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema, MergeFrom)]
408pub struct LanguageModelCacheConfiguration {
409 pub max_cache_anchors: usize,
410 pub should_speculate: bool,
411 pub min_total_token: u64,
412}
413
414#[derive(
415 Copy, Clone, Debug, Default, PartialEq, Eq, Serialize, Deserialize, JsonSchema, MergeFrom,
416)]
417#[serde(tag = "type", rename_all = "lowercase")]
418pub enum ModelMode {
419 #[default]
420 Default,
421 Thinking {
422 /// The maximum number of tokens to use for reasoning. Must be lower than the model's `max_output_tokens`.
423 budget_tokens: Option<u32>,
424 },
425}