1use collections::HashMap;
2use schemars::JsonSchema;
3use serde::{Deserialize, Serialize};
4use settings_macros::{MergeFrom, with_fallible_options};
5
6use std::sync::Arc;
7
8#[with_fallible_options]
9#[derive(Default, Clone, Debug, Serialize, Deserialize, PartialEq, JsonSchema, MergeFrom)]
10pub struct AllLanguageModelSettingsContent {
11 pub anthropic: Option<AnthropicSettingsContent>,
12 pub bedrock: Option<AmazonBedrockSettingsContent>,
13 pub deepseek: Option<DeepseekSettingsContent>,
14 pub google: Option<GoogleSettingsContent>,
15 pub lmstudio: Option<LmStudioSettingsContent>,
16 pub mistral: Option<MistralSettingsContent>,
17 pub ollama: Option<OllamaSettingsContent>,
18 pub open_router: Option<OpenRouterSettingsContent>,
19 pub openai: Option<OpenAiSettingsContent>,
20 pub openai_compatible: Option<HashMap<Arc<str>, OpenAiCompatibleSettingsContent>>,
21 pub vercel: Option<VercelSettingsContent>,
22 pub x_ai: Option<XAiSettingsContent>,
23 #[serde(rename = "zed.dev")]
24 pub zed_dot_dev: Option<ZedDotDevSettingsContent>,
25}
26
27#[with_fallible_options]
28#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, JsonSchema, MergeFrom)]
29pub struct AnthropicSettingsContent {
30 pub api_url: Option<String>,
31 pub available_models: Option<Vec<AnthropicAvailableModel>>,
32}
33
34#[with_fallible_options]
35#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema, MergeFrom)]
36pub struct AnthropicAvailableModel {
37 /// The model's name in the Anthropic API. e.g. claude-3-5-sonnet-latest, claude-3-opus-20240229, etc
38 pub name: String,
39 /// The model's name in Zed's UI, such as in the model selector dropdown menu in the assistant panel.
40 pub display_name: Option<String>,
41 /// The model's context window size.
42 pub max_tokens: u64,
43 /// A model `name` to substitute when calling tools, in case the primary model doesn't support tool calling.
44 pub tool_override: Option<String>,
45 /// Configuration of Anthropic's caching API.
46 pub cache_configuration: Option<LanguageModelCacheConfiguration>,
47 pub max_output_tokens: Option<u64>,
48 #[serde(serialize_with = "crate::serialize_optional_f32_with_two_decimal_places")]
49 pub default_temperature: Option<f32>,
50 #[serde(default)]
51 pub extra_beta_headers: Vec<String>,
52 /// The model's mode (e.g. thinking)
53 pub mode: Option<ModelMode>,
54}
55
56#[with_fallible_options]
57#[derive(Default, Clone, Debug, Serialize, Deserialize, PartialEq, JsonSchema, MergeFrom)]
58pub struct AmazonBedrockSettingsContent {
59 pub available_models: Option<Vec<BedrockAvailableModel>>,
60 pub endpoint_url: Option<String>,
61 pub region: Option<String>,
62 pub profile: Option<String>,
63 pub authentication_method: Option<BedrockAuthMethodContent>,
64}
65
66#[with_fallible_options]
67#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema, MergeFrom)]
68pub struct BedrockAvailableModel {
69 pub name: String,
70 pub display_name: Option<String>,
71 pub max_tokens: u64,
72 pub cache_configuration: Option<LanguageModelCacheConfiguration>,
73 pub max_output_tokens: Option<u64>,
74 #[serde(serialize_with = "crate::serialize_optional_f32_with_two_decimal_places")]
75 pub default_temperature: Option<f32>,
76 pub mode: Option<ModelMode>,
77}
78
79#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema, MergeFrom)]
80pub enum BedrockAuthMethodContent {
81 #[serde(rename = "named_profile")]
82 NamedProfile,
83 #[serde(rename = "sso")]
84 SingleSignOn,
85 /// IMDSv2, PodIdentity, env vars, etc.
86 #[serde(rename = "default")]
87 Automatic,
88}
89
90#[with_fallible_options]
91#[derive(Default, Clone, Debug, Serialize, Deserialize, PartialEq, JsonSchema, MergeFrom)]
92pub struct OllamaSettingsContent {
93 pub api_url: Option<String>,
94 pub available_models: Option<Vec<OllamaAvailableModel>>,
95}
96
97#[with_fallible_options]
98#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema, MergeFrom)]
99pub struct OllamaAvailableModel {
100 /// The model name in the Ollama API (e.g. "llama3.2:latest")
101 pub name: String,
102 /// The model's name in Zed's UI, such as in the model selector dropdown menu in the assistant panel.
103 pub display_name: Option<String>,
104 /// The Context Length parameter to the model (aka num_ctx or n_ctx)
105 pub max_tokens: u64,
106 /// The number of seconds to keep the connection open after the last request
107 pub keep_alive: Option<KeepAlive>,
108 /// Whether the model supports tools
109 pub supports_tools: Option<bool>,
110 /// Whether the model supports vision
111 pub supports_images: Option<bool>,
112 /// Whether to enable think mode
113 pub supports_thinking: Option<bool>,
114}
115
116#[derive(Clone, Serialize, Deserialize, Debug, Eq, PartialEq, JsonSchema, MergeFrom)]
117#[serde(untagged)]
118pub enum KeepAlive {
119 /// Keep model alive for N seconds
120 Seconds(isize),
121 /// Keep model alive for a fixed duration. Accepts durations like "5m", "10m", "1h", "1d", etc.
122 Duration(String),
123}
124
125impl KeepAlive {
126 /// Keep model alive until a new model is loaded or until Ollama shuts down
127 pub fn indefinite() -> Self {
128 Self::Seconds(-1)
129 }
130}
131
132impl Default for KeepAlive {
133 fn default() -> Self {
134 Self::indefinite()
135 }
136}
137
138#[with_fallible_options]
139#[derive(Default, Clone, Debug, Serialize, Deserialize, PartialEq, JsonSchema, MergeFrom)]
140pub struct LmStudioSettingsContent {
141 pub api_url: Option<String>,
142 pub available_models: Option<Vec<LmStudioAvailableModel>>,
143}
144
145#[with_fallible_options]
146#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema, MergeFrom)]
147pub struct LmStudioAvailableModel {
148 pub name: String,
149 pub display_name: Option<String>,
150 pub max_tokens: u64,
151 pub supports_tool_calls: bool,
152 pub supports_images: bool,
153}
154
155#[with_fallible_options]
156#[derive(Default, Clone, Debug, Serialize, Deserialize, PartialEq, JsonSchema, MergeFrom)]
157pub struct DeepseekSettingsContent {
158 pub api_url: Option<String>,
159 pub available_models: Option<Vec<DeepseekAvailableModel>>,
160}
161
162#[with_fallible_options]
163#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema, MergeFrom)]
164pub struct DeepseekAvailableModel {
165 pub name: String,
166 pub display_name: Option<String>,
167 pub max_tokens: u64,
168 pub max_output_tokens: Option<u64>,
169}
170
171#[with_fallible_options]
172#[derive(Default, Clone, Debug, Serialize, Deserialize, PartialEq, JsonSchema, MergeFrom)]
173pub struct MistralSettingsContent {
174 pub api_url: Option<String>,
175 pub available_models: Option<Vec<MistralAvailableModel>>,
176}
177
178#[with_fallible_options]
179#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema, MergeFrom)]
180pub struct MistralAvailableModel {
181 pub name: String,
182 pub display_name: Option<String>,
183 pub max_tokens: u64,
184 pub max_output_tokens: Option<u64>,
185 pub max_completion_tokens: Option<u64>,
186 pub supports_tools: Option<bool>,
187 pub supports_images: Option<bool>,
188 pub supports_thinking: Option<bool>,
189}
190
191#[with_fallible_options]
192#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, JsonSchema, MergeFrom)]
193pub struct OpenAiSettingsContent {
194 pub api_url: Option<String>,
195 pub available_models: Option<Vec<OpenAiAvailableModel>>,
196}
197
198#[with_fallible_options]
199#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema, MergeFrom)]
200pub struct OpenAiAvailableModel {
201 pub name: String,
202 pub display_name: Option<String>,
203 pub max_tokens: u64,
204 pub max_output_tokens: Option<u64>,
205 pub max_completion_tokens: Option<u64>,
206 pub reasoning_effort: Option<OpenAiReasoningEffort>,
207}
208
209#[derive(Debug, Serialize, Deserialize, PartialEq, Clone, JsonSchema, MergeFrom)]
210#[serde(rename_all = "lowercase")]
211pub enum OpenAiReasoningEffort {
212 Minimal,
213 Low,
214 Medium,
215 High,
216}
217
218#[with_fallible_options]
219#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, JsonSchema, MergeFrom)]
220pub struct OpenAiCompatibleSettingsContent {
221 pub api_url: String,
222 pub available_models: Vec<OpenAiCompatibleAvailableModel>,
223}
224
225#[with_fallible_options]
226#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema, MergeFrom)]
227pub struct OpenAiCompatibleAvailableModel {
228 pub name: String,
229 pub display_name: Option<String>,
230 pub max_tokens: u64,
231 pub max_output_tokens: Option<u64>,
232 pub max_completion_tokens: Option<u64>,
233 #[serde(default)]
234 pub capabilities: OpenAiCompatibleModelCapabilities,
235}
236
237#[with_fallible_options]
238#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema, MergeFrom)]
239pub struct OpenAiCompatibleModelCapabilities {
240 pub tools: bool,
241 pub images: bool,
242 pub parallel_tool_calls: bool,
243 pub prompt_cache_key: bool,
244}
245
246impl Default for OpenAiCompatibleModelCapabilities {
247 fn default() -> Self {
248 Self {
249 tools: true,
250 images: false,
251 parallel_tool_calls: false,
252 prompt_cache_key: false,
253 }
254 }
255}
256
257#[with_fallible_options]
258#[derive(Default, Clone, Debug, Serialize, Deserialize, PartialEq, JsonSchema, MergeFrom)]
259pub struct VercelSettingsContent {
260 pub api_url: Option<String>,
261 pub available_models: Option<Vec<VercelAvailableModel>>,
262}
263
264#[with_fallible_options]
265#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema, MergeFrom)]
266pub struct VercelAvailableModel {
267 pub name: String,
268 pub display_name: Option<String>,
269 pub max_tokens: u64,
270 pub max_output_tokens: Option<u64>,
271 pub max_completion_tokens: Option<u64>,
272}
273
274#[with_fallible_options]
275#[derive(Default, Clone, Debug, Serialize, Deserialize, PartialEq, JsonSchema, MergeFrom)]
276pub struct GoogleSettingsContent {
277 pub api_url: Option<String>,
278 pub available_models: Option<Vec<GoogleAvailableModel>>,
279}
280
281#[with_fallible_options]
282#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema, MergeFrom)]
283pub struct GoogleAvailableModel {
284 pub name: String,
285 pub display_name: Option<String>,
286 pub max_tokens: u64,
287 pub mode: Option<ModelMode>,
288}
289
290#[with_fallible_options]
291#[derive(Default, Clone, Debug, Serialize, Deserialize, PartialEq, JsonSchema, MergeFrom)]
292pub struct XAiSettingsContent {
293 pub api_url: Option<String>,
294 pub available_models: Option<Vec<XaiAvailableModel>>,
295}
296
297#[with_fallible_options]
298#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema, MergeFrom)]
299pub struct XaiAvailableModel {
300 pub name: String,
301 pub display_name: Option<String>,
302 pub max_tokens: u64,
303 pub max_output_tokens: Option<u64>,
304 pub max_completion_tokens: Option<u64>,
305 pub supports_images: Option<bool>,
306 pub supports_tools: Option<bool>,
307 pub parallel_tool_calls: Option<bool>,
308}
309
310#[with_fallible_options]
311#[derive(Default, Clone, Debug, Serialize, Deserialize, PartialEq, JsonSchema, MergeFrom)]
312pub struct ZedDotDevSettingsContent {
313 pub available_models: Option<Vec<ZedDotDevAvailableModel>>,
314}
315
316#[with_fallible_options]
317#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema, MergeFrom)]
318pub struct ZedDotDevAvailableModel {
319 /// The provider of the language model.
320 pub provider: ZedDotDevAvailableProvider,
321 /// The model's name in the provider's API. e.g. claude-3-5-sonnet-20240620
322 pub name: String,
323 /// The name displayed in the UI, such as in the assistant panel model dropdown menu.
324 pub display_name: Option<String>,
325 /// The size of the context window, indicating the maximum number of tokens the model can process.
326 pub max_tokens: usize,
327 /// The maximum number of output tokens allowed by the model.
328 pub max_output_tokens: Option<u64>,
329 /// The maximum number of completion tokens allowed by the model (o1-* only)
330 pub max_completion_tokens: Option<u64>,
331 /// Override this model with a different Anthropic model for tool calls.
332 pub tool_override: Option<String>,
333 /// Indicates whether this custom model supports caching.
334 pub cache_configuration: Option<LanguageModelCacheConfiguration>,
335 /// The default temperature to use for this model.
336 #[serde(serialize_with = "crate::serialize_optional_f32_with_two_decimal_places")]
337 pub default_temperature: Option<f32>,
338 /// Any extra beta headers to provide when using the model.
339 #[serde(default)]
340 pub extra_beta_headers: Vec<String>,
341 /// The model's mode (e.g. thinking)
342 pub mode: Option<ModelMode>,
343}
344
345#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema, MergeFrom)]
346#[serde(rename_all = "lowercase")]
347pub enum ZedDotDevAvailableProvider {
348 Anthropic,
349 OpenAi,
350 Google,
351}
352
353#[with_fallible_options]
354#[derive(Default, Clone, Debug, Serialize, Deserialize, PartialEq, JsonSchema, MergeFrom)]
355pub struct OpenRouterSettingsContent {
356 pub api_url: Option<String>,
357 pub available_models: Option<Vec<OpenRouterAvailableModel>>,
358}
359
360#[with_fallible_options]
361#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema, MergeFrom)]
362pub struct OpenRouterAvailableModel {
363 pub name: String,
364 pub display_name: Option<String>,
365 pub max_tokens: u64,
366 pub max_output_tokens: Option<u64>,
367 pub max_completion_tokens: Option<u64>,
368 pub supports_tools: Option<bool>,
369 pub supports_images: Option<bool>,
370 pub mode: Option<ModelMode>,
371 pub provider: Option<OpenRouterProvider>,
372}
373
374#[with_fallible_options]
375#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema, MergeFrom)]
376pub struct OpenRouterProvider {
377 order: Option<Vec<String>>,
378 #[serde(default = "default_true")]
379 allow_fallbacks: bool,
380 #[serde(default)]
381 require_parameters: bool,
382 #[serde(default)]
383 data_collection: DataCollection,
384 only: Option<Vec<String>>,
385 ignore: Option<Vec<String>>,
386 quantizations: Option<Vec<String>>,
387 sort: Option<String>,
388}
389
390#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize, JsonSchema, MergeFrom)]
391#[serde(rename_all = "lowercase")]
392pub enum DataCollection {
393 #[default]
394 Allow,
395 Disallow,
396}
397
398fn default_true() -> bool {
399 true
400}
401
402/// Configuration for caching language model messages.
403#[with_fallible_options]
404#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema, MergeFrom)]
405pub struct LanguageModelCacheConfiguration {
406 pub max_cache_anchors: usize,
407 pub should_speculate: bool,
408 pub min_total_token: u64,
409}
410
411#[derive(
412 Copy, Clone, Debug, Default, PartialEq, Eq, Serialize, Deserialize, JsonSchema, MergeFrom,
413)]
414#[serde(tag = "type", rename_all = "lowercase")]
415pub enum ModelMode {
416 #[default]
417 Default,
418 Thinking {
419 /// The maximum number of tokens to use for reasoning. Must be lower than the model's `max_output_tokens`.
420 budget_tokens: Option<u32>,
421 },
422}