1use collections::HashMap;
2use schemars::JsonSchema;
3use serde::{Deserialize, Serialize};
4use serde_with::skip_serializing_none;
5use settings_macros::MergeFrom;
6
7use std::sync::Arc;
8
9#[skip_serializing_none]
10#[derive(Default, Clone, Debug, Serialize, Deserialize, PartialEq, JsonSchema, MergeFrom)]
11pub struct AllLanguageModelSettingsContent {
12 pub anthropic: Option<AnthropicSettingsContent>,
13 pub bedrock: Option<AmazonBedrockSettingsContent>,
14 pub deepseek: Option<DeepseekSettingsContent>,
15 pub google: Option<GoogleSettingsContent>,
16 pub lmstudio: Option<LmStudioSettingsContent>,
17 pub mistral: Option<MistralSettingsContent>,
18 pub ollama: Option<OllamaSettingsContent>,
19 pub open_router: Option<OpenRouterSettingsContent>,
20 pub openai: Option<OpenAiSettingsContent>,
21 pub openai_compatible: Option<HashMap<Arc<str>, OpenAiCompatibleSettingsContent>>,
22 pub vercel: Option<VercelSettingsContent>,
23 pub x_ai: Option<XAiSettingsContent>,
24 #[serde(rename = "zed.dev")]
25 pub zed_dot_dev: Option<ZedDotDevSettingsContent>,
26}
27
28#[skip_serializing_none]
29#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, JsonSchema, MergeFrom)]
30pub struct AnthropicSettingsContent {
31 pub api_url: Option<String>,
32 pub available_models: Option<Vec<AnthropicAvailableModel>>,
33}
34
35#[skip_serializing_none]
36#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema, MergeFrom)]
37pub struct AnthropicAvailableModel {
38 /// The model's name in the Anthropic API. e.g. claude-3-5-sonnet-latest, claude-3-opus-20240229, etc
39 pub name: String,
40 /// The model's name in Zed's UI, such as in the model selector dropdown menu in the assistant panel.
41 pub display_name: Option<String>,
42 /// The model's context window size.
43 pub max_tokens: u64,
44 /// A model `name` to substitute when calling tools, in case the primary model doesn't support tool calling.
45 pub tool_override: Option<String>,
46 /// Configuration of Anthropic's caching API.
47 pub cache_configuration: Option<LanguageModelCacheConfiguration>,
48 pub max_output_tokens: Option<u64>,
49 #[serde(serialize_with = "crate::serialize_optional_f32_with_two_decimal_places")]
50 pub default_temperature: Option<f32>,
51 #[serde(default)]
52 pub extra_beta_headers: Vec<String>,
53 /// The model's mode (e.g. thinking)
54 pub mode: Option<ModelMode>,
55}
56
57#[skip_serializing_none]
58#[derive(Default, Clone, Debug, Serialize, Deserialize, PartialEq, JsonSchema, MergeFrom)]
59pub struct AmazonBedrockSettingsContent {
60 pub available_models: Option<Vec<BedrockAvailableModel>>,
61 pub endpoint_url: Option<String>,
62 pub region: Option<String>,
63 pub profile: Option<String>,
64 pub authentication_method: Option<BedrockAuthMethodContent>,
65}
66
67#[skip_serializing_none]
68#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema, MergeFrom)]
69pub struct BedrockAvailableModel {
70 pub name: String,
71 pub display_name: Option<String>,
72 pub max_tokens: u64,
73 pub cache_configuration: Option<LanguageModelCacheConfiguration>,
74 pub max_output_tokens: Option<u64>,
75 #[serde(serialize_with = "crate::serialize_optional_f32_with_two_decimal_places")]
76 pub default_temperature: Option<f32>,
77 pub mode: Option<ModelMode>,
78}
79
80#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema, MergeFrom)]
81pub enum BedrockAuthMethodContent {
82 #[serde(rename = "named_profile")]
83 NamedProfile,
84 #[serde(rename = "sso")]
85 SingleSignOn,
86 /// IMDSv2, PodIdentity, env vars, etc.
87 #[serde(rename = "default")]
88 Automatic,
89}
90
91#[skip_serializing_none]
92#[derive(Default, Clone, Debug, Serialize, Deserialize, PartialEq, JsonSchema, MergeFrom)]
93pub struct OllamaSettingsContent {
94 pub api_url: Option<String>,
95 pub available_models: Option<Vec<OllamaAvailableModel>>,
96}
97
98#[skip_serializing_none]
99#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema, MergeFrom)]
100pub struct OllamaAvailableModel {
101 /// The model name in the Ollama API (e.g. "llama3.2:latest")
102 pub name: String,
103 /// The model's name in Zed's UI, such as in the model selector dropdown menu in the assistant panel.
104 pub display_name: Option<String>,
105 /// The Context Length parameter to the model (aka num_ctx or n_ctx)
106 pub max_tokens: u64,
107 /// The number of seconds to keep the connection open after the last request
108 pub keep_alive: Option<KeepAlive>,
109 /// Whether the model supports tools
110 pub supports_tools: Option<bool>,
111 /// Whether the model supports vision
112 pub supports_images: Option<bool>,
113 /// Whether to enable think mode
114 pub supports_thinking: Option<bool>,
115}
116
117#[derive(Clone, Serialize, Deserialize, Debug, Eq, PartialEq, JsonSchema, MergeFrom)]
118#[serde(untagged)]
119pub enum KeepAlive {
120 /// Keep model alive for N seconds
121 Seconds(isize),
122 /// Keep model alive for a fixed duration. Accepts durations like "5m", "10m", "1h", "1d", etc.
123 Duration(String),
124}
125
126impl KeepAlive {
127 /// Keep model alive until a new model is loaded or until Ollama shuts down
128 pub fn indefinite() -> Self {
129 Self::Seconds(-1)
130 }
131}
132
133impl Default for KeepAlive {
134 fn default() -> Self {
135 Self::indefinite()
136 }
137}
138
139#[skip_serializing_none]
140#[derive(Default, Clone, Debug, Serialize, Deserialize, PartialEq, JsonSchema, MergeFrom)]
141pub struct LmStudioSettingsContent {
142 pub api_url: Option<String>,
143 pub available_models: Option<Vec<LmStudioAvailableModel>>,
144}
145
146#[skip_serializing_none]
147#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema, MergeFrom)]
148pub struct LmStudioAvailableModel {
149 pub name: String,
150 pub display_name: Option<String>,
151 pub max_tokens: u64,
152 pub supports_tool_calls: bool,
153 pub supports_images: bool,
154}
155
156#[skip_serializing_none]
157#[derive(Default, Clone, Debug, Serialize, Deserialize, PartialEq, JsonSchema, MergeFrom)]
158pub struct DeepseekSettingsContent {
159 pub api_url: Option<String>,
160 pub available_models: Option<Vec<DeepseekAvailableModel>>,
161}
162
163#[skip_serializing_none]
164#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema, MergeFrom)]
165pub struct DeepseekAvailableModel {
166 pub name: String,
167 pub display_name: Option<String>,
168 pub max_tokens: u64,
169 pub max_output_tokens: Option<u64>,
170}
171
172#[skip_serializing_none]
173#[derive(Default, Clone, Debug, Serialize, Deserialize, PartialEq, JsonSchema, MergeFrom)]
174pub struct MistralSettingsContent {
175 pub api_url: Option<String>,
176 pub available_models: Option<Vec<MistralAvailableModel>>,
177}
178
179#[skip_serializing_none]
180#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema, MergeFrom)]
181pub struct MistralAvailableModel {
182 pub name: String,
183 pub display_name: Option<String>,
184 pub max_tokens: u64,
185 pub max_output_tokens: Option<u64>,
186 pub max_completion_tokens: Option<u64>,
187 pub supports_tools: Option<bool>,
188 pub supports_images: Option<bool>,
189 pub supports_thinking: Option<bool>,
190}
191
192#[skip_serializing_none]
193#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, JsonSchema, MergeFrom)]
194pub struct OpenAiSettingsContent {
195 pub api_url: Option<String>,
196 pub available_models: Option<Vec<OpenAiAvailableModel>>,
197}
198
199#[skip_serializing_none]
200#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema, MergeFrom)]
201pub struct OpenAiAvailableModel {
202 pub name: String,
203 pub display_name: Option<String>,
204 pub max_tokens: u64,
205 pub max_output_tokens: Option<u64>,
206 pub max_completion_tokens: Option<u64>,
207 pub reasoning_effort: Option<OpenAiReasoningEffort>,
208}
209
210#[derive(Debug, Serialize, Deserialize, PartialEq, Clone, JsonSchema, MergeFrom)]
211#[serde(rename_all = "lowercase")]
212pub enum OpenAiReasoningEffort {
213 Minimal,
214 Low,
215 Medium,
216 High,
217}
218
219#[skip_serializing_none]
220#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, JsonSchema, MergeFrom)]
221pub struct OpenAiCompatibleSettingsContent {
222 pub api_url: String,
223 pub available_models: Vec<OpenAiCompatibleAvailableModel>,
224}
225
226#[skip_serializing_none]
227#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema, MergeFrom)]
228pub struct OpenAiCompatibleAvailableModel {
229 pub name: String,
230 pub display_name: Option<String>,
231 pub max_tokens: u64,
232 pub max_output_tokens: Option<u64>,
233 pub max_completion_tokens: Option<u64>,
234 #[serde(default)]
235 pub capabilities: OpenAiCompatibleModelCapabilities,
236}
237
238#[skip_serializing_none]
239#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema, MergeFrom)]
240pub struct OpenAiCompatibleModelCapabilities {
241 pub tools: bool,
242 pub images: bool,
243 pub parallel_tool_calls: bool,
244 pub prompt_cache_key: bool,
245}
246
247impl Default for OpenAiCompatibleModelCapabilities {
248 fn default() -> Self {
249 Self {
250 tools: true,
251 images: false,
252 parallel_tool_calls: false,
253 prompt_cache_key: false,
254 }
255 }
256}
257
258#[skip_serializing_none]
259#[derive(Default, Clone, Debug, Serialize, Deserialize, PartialEq, JsonSchema, MergeFrom)]
260pub struct VercelSettingsContent {
261 pub api_url: Option<String>,
262 pub available_models: Option<Vec<VercelAvailableModel>>,
263}
264
265#[skip_serializing_none]
266#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema, MergeFrom)]
267pub struct VercelAvailableModel {
268 pub name: String,
269 pub display_name: Option<String>,
270 pub max_tokens: u64,
271 pub max_output_tokens: Option<u64>,
272 pub max_completion_tokens: Option<u64>,
273}
274
275#[skip_serializing_none]
276#[derive(Default, Clone, Debug, Serialize, Deserialize, PartialEq, JsonSchema, MergeFrom)]
277pub struct GoogleSettingsContent {
278 pub api_url: Option<String>,
279 pub available_models: Option<Vec<GoogleAvailableModel>>,
280}
281
282#[skip_serializing_none]
283#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema, MergeFrom)]
284pub struct GoogleAvailableModel {
285 pub name: String,
286 pub display_name: Option<String>,
287 pub max_tokens: u64,
288 pub mode: Option<ModelMode>,
289}
290
291#[skip_serializing_none]
292#[derive(Default, Clone, Debug, Serialize, Deserialize, PartialEq, JsonSchema, MergeFrom)]
293pub struct XAiSettingsContent {
294 pub api_url: Option<String>,
295 pub available_models: Option<Vec<XaiAvailableModel>>,
296}
297
298#[skip_serializing_none]
299#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema, MergeFrom)]
300pub struct XaiAvailableModel {
301 pub name: String,
302 pub display_name: Option<String>,
303 pub max_tokens: u64,
304 pub max_output_tokens: Option<u64>,
305 pub max_completion_tokens: Option<u64>,
306 pub supports_images: Option<bool>,
307 pub supports_tools: Option<bool>,
308 pub parallel_tool_calls: Option<bool>,
309}
310
311#[skip_serializing_none]
312#[derive(Default, Clone, Debug, Serialize, Deserialize, PartialEq, JsonSchema, MergeFrom)]
313pub struct ZedDotDevSettingsContent {
314 pub available_models: Option<Vec<ZedDotDevAvailableModel>>,
315}
316
317#[skip_serializing_none]
318#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema, MergeFrom)]
319pub struct ZedDotDevAvailableModel {
320 /// The provider of the language model.
321 pub provider: ZedDotDevAvailableProvider,
322 /// The model's name in the provider's API. e.g. claude-3-5-sonnet-20240620
323 pub name: String,
324 /// The name displayed in the UI, such as in the assistant panel model dropdown menu.
325 pub display_name: Option<String>,
326 /// The size of the context window, indicating the maximum number of tokens the model can process.
327 pub max_tokens: usize,
328 /// The maximum number of output tokens allowed by the model.
329 pub max_output_tokens: Option<u64>,
330 /// The maximum number of completion tokens allowed by the model (o1-* only)
331 pub max_completion_tokens: Option<u64>,
332 /// Override this model with a different Anthropic model for tool calls.
333 pub tool_override: Option<String>,
334 /// Indicates whether this custom model supports caching.
335 pub cache_configuration: Option<LanguageModelCacheConfiguration>,
336 /// The default temperature to use for this model.
337 #[serde(serialize_with = "crate::serialize_optional_f32_with_two_decimal_places")]
338 pub default_temperature: Option<f32>,
339 /// Any extra beta headers to provide when using the model.
340 #[serde(default)]
341 pub extra_beta_headers: Vec<String>,
342 /// The model's mode (e.g. thinking)
343 pub mode: Option<ModelMode>,
344}
345
346#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema, MergeFrom)]
347#[serde(rename_all = "lowercase")]
348pub enum ZedDotDevAvailableProvider {
349 Anthropic,
350 OpenAi,
351 Google,
352}
353
354#[skip_serializing_none]
355#[derive(Default, Clone, Debug, Serialize, Deserialize, PartialEq, JsonSchema, MergeFrom)]
356pub struct OpenRouterSettingsContent {
357 pub api_url: Option<String>,
358 pub available_models: Option<Vec<OpenRouterAvailableModel>>,
359}
360
361#[skip_serializing_none]
362#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema, MergeFrom)]
363pub struct OpenRouterAvailableModel {
364 pub name: String,
365 pub display_name: Option<String>,
366 pub max_tokens: u64,
367 pub max_output_tokens: Option<u64>,
368 pub max_completion_tokens: Option<u64>,
369 pub supports_tools: Option<bool>,
370 pub supports_images: Option<bool>,
371 pub mode: Option<ModelMode>,
372 pub provider: Option<OpenRouterProvider>,
373}
374
375#[skip_serializing_none]
376#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema, MergeFrom)]
377pub struct OpenRouterProvider {
378 order: Option<Vec<String>>,
379 #[serde(default = "default_true")]
380 allow_fallbacks: bool,
381 #[serde(default)]
382 require_parameters: bool,
383 #[serde(default)]
384 data_collection: DataCollection,
385 only: Option<Vec<String>>,
386 ignore: Option<Vec<String>>,
387 quantizations: Option<Vec<String>>,
388 sort: Option<String>,
389}
390
391#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema, MergeFrom)]
392#[serde(rename_all = "lowercase")]
393pub enum DataCollection {
394 Allow,
395 Disallow,
396}
397
398impl Default for DataCollection {
399 fn default() -> Self {
400 Self::Allow
401 }
402}
403
404fn default_true() -> bool {
405 true
406}
407
408/// Configuration for caching language model messages.
409#[skip_serializing_none]
410#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema, MergeFrom)]
411pub struct LanguageModelCacheConfiguration {
412 pub max_cache_anchors: usize,
413 pub should_speculate: bool,
414 pub min_total_token: u64,
415}
416
417#[derive(
418 Copy, Clone, Debug, Default, PartialEq, Eq, Serialize, Deserialize, JsonSchema, MergeFrom,
419)]
420#[serde(tag = "type", rename_all = "lowercase")]
421pub enum ModelMode {
422 #[default]
423 Default,
424 Thinking {
425 /// The maximum number of tokens to use for reasoning. Must be lower than the model's `max_output_tokens`.
426 budget_tokens: Option<u32>,
427 },
428}