1use collections::HashMap;
2use schemars::JsonSchema;
3use serde::{Deserialize, Serialize};
4use serde_with::skip_serializing_none;
5
6use std::sync::Arc;
7
8#[skip_serializing_none]
9#[derive(Default, Clone, Debug, Serialize, Deserialize, PartialEq, JsonSchema)]
10pub struct AllLanguageModelSettingsContent {
11 pub anthropic: Option<AnthropicSettingsContent>,
12 pub bedrock: Option<AmazonBedrockSettingsContent>,
13 pub deepseek: Option<DeepseekSettingsContent>,
14 pub google: Option<GoogleSettingsContent>,
15 pub lmstudio: Option<LmStudioSettingsContent>,
16 pub mistral: Option<MistralSettingsContent>,
17 pub ollama: Option<OllamaSettingsContent>,
18 pub open_router: Option<OpenRouterSettingsContent>,
19 pub openai: Option<OpenAiSettingsContent>,
20 pub openai_compatible: Option<HashMap<Arc<str>, OpenAiCompatibleSettingsContent>>,
21 pub vercel: Option<VercelSettingsContent>,
22 pub x_ai: Option<XAiSettingsContent>,
23 #[serde(rename = "zed.dev")]
24 pub zed_dot_dev: Option<ZedDotDevSettingsContent>,
25}
26
27#[skip_serializing_none]
28#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, JsonSchema)]
29pub struct AnthropicSettingsContent {
30 pub api_url: Option<String>,
31 pub available_models: Option<Vec<AnthropicAvailableModel>>,
32}
33
34#[skip_serializing_none]
35#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema)]
36pub struct AnthropicAvailableModel {
37 /// The model's name in the Anthropic API. e.g. claude-3-5-sonnet-latest, claude-3-opus-20240229, etc
38 pub name: String,
39 /// The model's name in Zed's UI, such as in the model selector dropdown menu in the assistant panel.
40 pub display_name: Option<String>,
41 /// The model's context window size.
42 pub max_tokens: u64,
43 /// A model `name` to substitute when calling tools, in case the primary model doesn't support tool calling.
44 pub tool_override: Option<String>,
45 /// Configuration of Anthropic's caching API.
46 pub cache_configuration: Option<LanguageModelCacheConfiguration>,
47 pub max_output_tokens: Option<u64>,
48 pub default_temperature: Option<f32>,
49 #[serde(default)]
50 pub extra_beta_headers: Vec<String>,
51 /// The model's mode (e.g. thinking)
52 pub mode: Option<ModelMode>,
53}
54
55#[skip_serializing_none]
56#[derive(Default, Clone, Debug, Serialize, Deserialize, PartialEq, JsonSchema)]
57pub struct AmazonBedrockSettingsContent {
58 pub available_models: Option<Vec<BedrockAvailableModel>>,
59 pub endpoint_url: Option<String>,
60 pub region: Option<String>,
61 pub profile: Option<String>,
62 pub authentication_method: Option<BedrockAuthMethodContent>,
63}
64
65#[skip_serializing_none]
66#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema)]
67pub struct BedrockAvailableModel {
68 pub name: String,
69 pub display_name: Option<String>,
70 pub max_tokens: u64,
71 pub cache_configuration: Option<LanguageModelCacheConfiguration>,
72 pub max_output_tokens: Option<u64>,
73 pub default_temperature: Option<f32>,
74 pub mode: Option<ModelMode>,
75}
76
77#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema)]
78pub enum BedrockAuthMethodContent {
79 #[serde(rename = "named_profile")]
80 NamedProfile,
81 #[serde(rename = "sso")]
82 SingleSignOn,
83 /// IMDSv2, PodIdentity, env vars, etc.
84 #[serde(rename = "default")]
85 Automatic,
86}
87
88#[skip_serializing_none]
89#[derive(Default, Clone, Debug, Serialize, Deserialize, PartialEq, JsonSchema)]
90pub struct OllamaSettingsContent {
91 pub api_url: Option<String>,
92 pub available_models: Option<Vec<OllamaAvailableModel>>,
93}
94
95#[skip_serializing_none]
96#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema)]
97pub struct OllamaAvailableModel {
98 /// The model name in the Ollama API (e.g. "llama3.2:latest")
99 pub name: String,
100 /// The model's name in Zed's UI, such as in the model selector dropdown menu in the assistant panel.
101 pub display_name: Option<String>,
102 /// The Context Length parameter to the model (aka num_ctx or n_ctx)
103 pub max_tokens: u64,
104 /// The number of seconds to keep the connection open after the last request
105 pub keep_alive: Option<KeepAlive>,
106 /// Whether the model supports tools
107 pub supports_tools: Option<bool>,
108 /// Whether the model supports vision
109 pub supports_images: Option<bool>,
110 /// Whether to enable think mode
111 pub supports_thinking: Option<bool>,
112}
113
114#[derive(Clone, Serialize, Deserialize, Debug, Eq, PartialEq, JsonSchema)]
115#[serde(untagged)]
116pub enum KeepAlive {
117 /// Keep model alive for N seconds
118 Seconds(isize),
119 /// Keep model alive for a fixed duration. Accepts durations like "5m", "10m", "1h", "1d", etc.
120 Duration(String),
121}
122
123impl KeepAlive {
124 /// Keep model alive until a new model is loaded or until Ollama shuts down
125 pub fn indefinite() -> Self {
126 Self::Seconds(-1)
127 }
128}
129
130impl Default for KeepAlive {
131 fn default() -> Self {
132 Self::indefinite()
133 }
134}
135
136#[skip_serializing_none]
137#[derive(Default, Clone, Debug, Serialize, Deserialize, PartialEq, JsonSchema)]
138pub struct LmStudioSettingsContent {
139 pub api_url: Option<String>,
140 pub available_models: Option<Vec<LmStudioAvailableModel>>,
141}
142
143#[skip_serializing_none]
144#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema)]
145pub struct LmStudioAvailableModel {
146 pub name: String,
147 pub display_name: Option<String>,
148 pub max_tokens: u64,
149 pub supports_tool_calls: bool,
150 pub supports_images: bool,
151}
152
153#[skip_serializing_none]
154#[derive(Default, Clone, Debug, Serialize, Deserialize, PartialEq, JsonSchema)]
155pub struct DeepseekSettingsContent {
156 pub api_url: Option<String>,
157 pub available_models: Option<Vec<DeepseekAvailableModel>>,
158}
159
160#[skip_serializing_none]
161#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema)]
162pub struct DeepseekAvailableModel {
163 pub name: String,
164 pub display_name: Option<String>,
165 pub max_tokens: u64,
166 pub max_output_tokens: Option<u64>,
167}
168
169#[skip_serializing_none]
170#[derive(Default, Clone, Debug, Serialize, Deserialize, PartialEq, JsonSchema)]
171pub struct MistralSettingsContent {
172 pub api_url: Option<String>,
173 pub available_models: Option<Vec<MistralAvailableModel>>,
174}
175
176#[skip_serializing_none]
177#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema)]
178pub struct MistralAvailableModel {
179 pub name: String,
180 pub display_name: Option<String>,
181 pub max_tokens: u64,
182 pub max_output_tokens: Option<u64>,
183 pub max_completion_tokens: Option<u64>,
184 pub supports_tools: Option<bool>,
185 pub supports_images: Option<bool>,
186 pub supports_thinking: Option<bool>,
187}
188
189#[skip_serializing_none]
190#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, JsonSchema)]
191pub struct OpenAiSettingsContent {
192 pub api_url: Option<String>,
193 pub available_models: Option<Vec<OpenAiAvailableModel>>,
194}
195
196#[skip_serializing_none]
197#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema)]
198pub struct OpenAiAvailableModel {
199 pub name: String,
200 pub display_name: Option<String>,
201 pub max_tokens: u64,
202 pub max_output_tokens: Option<u64>,
203 pub max_completion_tokens: Option<u64>,
204 pub reasoning_effort: Option<OpenAiReasoningEffort>,
205}
206
207#[derive(Debug, Serialize, Deserialize, PartialEq, Clone, JsonSchema)]
208#[serde(rename_all = "lowercase")]
209pub enum OpenAiReasoningEffort {
210 Minimal,
211 Low,
212 Medium,
213 High,
214}
215
216#[skip_serializing_none]
217#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, JsonSchema)]
218pub struct OpenAiCompatibleSettingsContent {
219 pub api_url: String,
220 pub available_models: Vec<OpenAiCompatibleAvailableModel>,
221}
222
223#[skip_serializing_none]
224#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema)]
225pub struct OpenAiCompatibleAvailableModel {
226 pub name: String,
227 pub display_name: Option<String>,
228 pub max_tokens: u64,
229 pub max_output_tokens: Option<u64>,
230 pub max_completion_tokens: Option<u64>,
231 #[serde(default)]
232 pub capabilities: OpenAiCompatibleModelCapabilities,
233}
234
235#[skip_serializing_none]
236#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema)]
237pub struct OpenAiCompatibleModelCapabilities {
238 pub tools: bool,
239 pub images: bool,
240 pub parallel_tool_calls: bool,
241 pub prompt_cache_key: bool,
242}
243
244impl Default for OpenAiCompatibleModelCapabilities {
245 fn default() -> Self {
246 Self {
247 tools: true,
248 images: false,
249 parallel_tool_calls: false,
250 prompt_cache_key: false,
251 }
252 }
253}
254
255#[skip_serializing_none]
256#[derive(Default, Clone, Debug, Serialize, Deserialize, PartialEq, JsonSchema)]
257pub struct VercelSettingsContent {
258 pub api_url: Option<String>,
259 pub available_models: Option<Vec<VercelAvailableModel>>,
260}
261
262#[skip_serializing_none]
263#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema)]
264pub struct VercelAvailableModel {
265 pub name: String,
266 pub display_name: Option<String>,
267 pub max_tokens: u64,
268 pub max_output_tokens: Option<u64>,
269 pub max_completion_tokens: Option<u64>,
270}
271
272#[skip_serializing_none]
273#[derive(Default, Clone, Debug, Serialize, Deserialize, PartialEq, JsonSchema)]
274pub struct GoogleSettingsContent {
275 pub api_url: Option<String>,
276 pub available_models: Option<Vec<GoogleAvailableModel>>,
277}
278
279#[skip_serializing_none]
280#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema)]
281pub struct GoogleAvailableModel {
282 pub name: String,
283 pub display_name: Option<String>,
284 pub max_tokens: u64,
285 pub mode: Option<ModelMode>,
286}
287
288#[skip_serializing_none]
289#[derive(Default, Clone, Debug, Serialize, Deserialize, PartialEq, JsonSchema)]
290pub struct XAiSettingsContent {
291 pub api_url: Option<String>,
292 pub available_models: Option<Vec<XaiAvailableModel>>,
293}
294
295#[skip_serializing_none]
296#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema)]
297pub struct XaiAvailableModel {
298 pub name: String,
299 pub display_name: Option<String>,
300 pub max_tokens: u64,
301 pub max_output_tokens: Option<u64>,
302 pub max_completion_tokens: Option<u64>,
303}
304
305#[skip_serializing_none]
306#[derive(Default, Clone, Debug, Serialize, Deserialize, PartialEq, JsonSchema)]
307pub struct ZedDotDevSettingsContent {
308 pub available_models: Option<Vec<ZedDotDevAvailableModel>>,
309}
310
311#[skip_serializing_none]
312#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema)]
313pub struct ZedDotDevAvailableModel {
314 /// The provider of the language model.
315 pub provider: ZedDotDevAvailableProvider,
316 /// The model's name in the provider's API. e.g. claude-3-5-sonnet-20240620
317 pub name: String,
318 /// The name displayed in the UI, such as in the assistant panel model dropdown menu.
319 pub display_name: Option<String>,
320 /// The size of the context window, indicating the maximum number of tokens the model can process.
321 pub max_tokens: usize,
322 /// The maximum number of output tokens allowed by the model.
323 pub max_output_tokens: Option<u64>,
324 /// The maximum number of completion tokens allowed by the model (o1-* only)
325 pub max_completion_tokens: Option<u64>,
326 /// Override this model with a different Anthropic model for tool calls.
327 pub tool_override: Option<String>,
328 /// Indicates whether this custom model supports caching.
329 pub cache_configuration: Option<LanguageModelCacheConfiguration>,
330 /// The default temperature to use for this model.
331 pub default_temperature: Option<f32>,
332 /// Any extra beta headers to provide when using the model.
333 #[serde(default)]
334 pub extra_beta_headers: Vec<String>,
335 /// The model's mode (e.g. thinking)
336 pub mode: Option<ModelMode>,
337}
338
339#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema)]
340#[serde(rename_all = "lowercase")]
341pub enum ZedDotDevAvailableProvider {
342 Anthropic,
343 OpenAi,
344 Google,
345}
346
347#[skip_serializing_none]
348#[derive(Default, Clone, Debug, Serialize, Deserialize, PartialEq, JsonSchema)]
349pub struct OpenRouterSettingsContent {
350 pub api_url: Option<String>,
351 pub available_models: Option<Vec<OpenRouterAvailableModel>>,
352}
353
354#[skip_serializing_none]
355#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema)]
356pub struct OpenRouterAvailableModel {
357 pub name: String,
358 pub display_name: Option<String>,
359 pub max_tokens: u64,
360 pub max_output_tokens: Option<u64>,
361 pub max_completion_tokens: Option<u64>,
362 pub supports_tools: Option<bool>,
363 pub supports_images: Option<bool>,
364 pub mode: Option<ModelMode>,
365 pub provider: Option<OpenRouterProvider>,
366}
367
368#[skip_serializing_none]
369#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema)]
370pub struct OpenRouterProvider {
371 order: Option<Vec<String>>,
372 #[serde(default = "default_true")]
373 allow_fallbacks: bool,
374 #[serde(default)]
375 require_parameters: bool,
376 #[serde(default)]
377 data_collection: DataCollection,
378 only: Option<Vec<String>>,
379 ignore: Option<Vec<String>>,
380 quantizations: Option<Vec<String>>,
381 sort: Option<String>,
382}
383
384#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema)]
385#[serde(rename_all = "lowercase")]
386pub enum DataCollection {
387 Allow,
388 Disallow,
389}
390
391impl Default for DataCollection {
392 fn default() -> Self {
393 Self::Allow
394 }
395}
396
397fn default_true() -> bool {
398 true
399}
400
401/// Configuration for caching language model messages.
402#[skip_serializing_none]
403#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema)]
404pub struct LanguageModelCacheConfiguration {
405 pub max_cache_anchors: usize,
406 pub should_speculate: bool,
407 pub min_total_token: u64,
408}
409
410#[derive(Copy, Clone, Debug, Default, PartialEq, Eq, Serialize, Deserialize, JsonSchema)]
411#[serde(tag = "type", rename_all = "lowercase")]
412pub enum ModelMode {
413 #[default]
414 Default,
415 Thinking {
416 /// The maximum number of tokens to use for reasoning. Must be lower than the model's `max_output_tokens`.
417 budget_tokens: Option<u32>,
418 },
419}