1pub mod predict_edits_v3;
2
3use std::str::FromStr;
4use std::sync::Arc;
5
6use anyhow::Context as _;
7use serde::{Deserialize, Serialize};
8use strum::{Display, EnumIter, EnumString};
9use uuid::Uuid;
10
11/// The name of the header used to indicate which version of Zed the client is running.
12pub const ZED_VERSION_HEADER_NAME: &str = "x-zed-version";
13
14/// The name of the header used to indicate when a request failed due to an
15/// expired LLM token.
16///
17/// The client may use this as a signal to refresh the token.
18pub const EXPIRED_LLM_TOKEN_HEADER_NAME: &str = "x-zed-expired-token";
19
20/// The name of the header used to indicate when a request failed due to an outdated LLM token.
21///
22/// A token is considered "outdated" when we can't parse the claims (e.g., after adding a new required claim).
23///
24/// This is distinct from [`EXPIRED_LLM_TOKEN_HEADER_NAME`] which indicates the token's time-based validity has passed.
25/// An outdated token means the token's structure is incompatible with the current server expectations.
26pub const OUTDATED_LLM_TOKEN_HEADER_NAME: &str = "x-zed-outdated-token";
27
28/// The name of the header used to indicate the usage limit for edit predictions.
29pub const EDIT_PREDICTIONS_USAGE_LIMIT_HEADER_NAME: &str = "x-zed-edit-predictions-usage-limit";
30
31/// The name of the header used to indicate the usage amount for edit predictions.
32pub const EDIT_PREDICTIONS_USAGE_AMOUNT_HEADER_NAME: &str = "x-zed-edit-predictions-usage-amount";
33
34pub const EDIT_PREDICTIONS_RESOURCE_HEADER_VALUE: &str = "edit_predictions";
35
36/// The name of the header used to indicate the minimum required Zed version.
37///
38/// This can be used to force a Zed upgrade in order to continue communicating
39/// with the LLM service.
40pub const MINIMUM_REQUIRED_VERSION_HEADER_NAME: &str = "x-zed-minimum-required-version";
41
42/// The name of the header used by the client to indicate to the server that it supports receiving status messages.
43pub const CLIENT_SUPPORTS_STATUS_MESSAGES_HEADER_NAME: &str =
44 "x-zed-client-supports-status-messages";
45
46/// The name of the header used by the client to indicate to the server that it supports receiving a "stream_ended" request completion status.
47pub const CLIENT_SUPPORTS_STATUS_STREAM_ENDED_HEADER_NAME: &str =
48 "x-zed-client-supports-stream-ended-request-completion-status";
49
50/// The name of the header used by the server to indicate to the client that it supports sending status messages.
51pub const SERVER_SUPPORTS_STATUS_MESSAGES_HEADER_NAME: &str =
52 "x-zed-server-supports-status-messages";
53
54/// The name of the header used by the client to indicate that it supports receiving xAI models.
55pub const CLIENT_SUPPORTS_X_AI_HEADER_NAME: &str = "x-zed-client-supports-x-ai";
56
57/// The maximum number of edit predictions that can be rejected per request.
58pub const MAX_EDIT_PREDICTION_REJECTIONS_PER_REQUEST: usize = 100;
59
60#[derive(Debug, PartialEq, Clone, Copy, Serialize, Deserialize)]
61#[serde(rename_all = "snake_case")]
62pub enum UsageLimit {
63 Limited(i32),
64 Unlimited,
65}
66
67impl FromStr for UsageLimit {
68 type Err = anyhow::Error;
69
70 fn from_str(value: &str) -> Result<Self, Self::Err> {
71 match value {
72 "unlimited" => Ok(Self::Unlimited),
73 limit => limit
74 .parse::<i32>()
75 .map(Self::Limited)
76 .context("failed to parse limit"),
77 }
78 }
79}
80
81#[derive(
82 Debug, PartialEq, Eq, Hash, Clone, Copy, Serialize, Deserialize, EnumString, EnumIter, Display,
83)]
84#[serde(rename_all = "snake_case")]
85#[strum(serialize_all = "snake_case")]
86pub enum LanguageModelProvider {
87 Anthropic,
88 OpenAi,
89 Google,
90 XAi,
91}
92
93#[derive(Debug, Clone, Serialize, Deserialize)]
94pub struct PredictEditsBody {
95 #[serde(skip_serializing_if = "Option::is_none", default)]
96 pub outline: Option<String>,
97 pub input_events: String,
98 pub input_excerpt: String,
99 #[serde(skip_serializing_if = "Option::is_none", default)]
100 pub speculated_output: Option<String>,
101 /// Whether the user provided consent for sampling this interaction.
102 #[serde(default, alias = "data_collection_permission")]
103 pub can_collect_data: bool,
104 #[serde(skip_serializing_if = "Option::is_none", default)]
105 pub diagnostic_groups: Option<Vec<(String, serde_json::Value)>>,
106 /// Info about the git repository state, only present when can_collect_data is true.
107 #[serde(skip_serializing_if = "Option::is_none", default)]
108 pub git_info: Option<PredictEditsGitInfo>,
109 /// The trigger for this request.
110 #[serde(default)]
111 pub trigger: PredictEditsRequestTrigger,
112}
113
114#[derive(Default, Debug, Clone, Copy, Serialize, Deserialize)]
115pub enum PredictEditsRequestTrigger {
116 Testing,
117 Diagnostics,
118 Cli,
119 #[default]
120 Other,
121}
122
123#[derive(Debug, Clone, Serialize, Deserialize)]
124pub struct PredictEditsGitInfo {
125 /// SHA of git HEAD commit at time of prediction.
126 #[serde(skip_serializing_if = "Option::is_none", default)]
127 pub head_sha: Option<String>,
128 /// URL of the remote called `origin`.
129 #[serde(skip_serializing_if = "Option::is_none", default)]
130 pub remote_origin_url: Option<String>,
131 /// URL of the remote called `upstream`.
132 #[serde(skip_serializing_if = "Option::is_none", default)]
133 pub remote_upstream_url: Option<String>,
134}
135
136#[derive(Debug, Clone, Serialize, Deserialize)]
137pub struct PredictEditsResponse {
138 pub request_id: String,
139 pub output_excerpt: String,
140}
141
142#[derive(Debug, Clone, Serialize, Deserialize)]
143pub struct AcceptEditPredictionBody {
144 pub request_id: String,
145 #[serde(default, skip_serializing_if = "Option::is_none")]
146 pub model_version: Option<String>,
147}
148
149#[derive(Debug, Clone, Deserialize)]
150pub struct RejectEditPredictionsBody {
151 pub rejections: Vec<EditPredictionRejection>,
152}
153
154#[derive(Debug, Clone, Serialize)]
155pub struct RejectEditPredictionsBodyRef<'a> {
156 pub rejections: &'a [EditPredictionRejection],
157}
158
159#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
160pub struct EditPredictionRejection {
161 pub request_id: String,
162 #[serde(default)]
163 pub reason: EditPredictionRejectReason,
164 pub was_shown: bool,
165 #[serde(default, skip_serializing_if = "Option::is_none")]
166 pub model_version: Option<String>,
167}
168
169#[derive(Default, Debug, Clone, Copy, Serialize, Deserialize, PartialEq)]
170pub enum EditPredictionRejectReason {
171 /// New requests were triggered before this one completed
172 Canceled,
173 /// No edits returned
174 Empty,
175 /// Edits returned, but none remained after interpolation
176 InterpolatedEmpty,
177 /// The new prediction was preferred over the current one
178 Replaced,
179 /// The current prediction was preferred over the new one
180 CurrentPreferred,
181 /// The current prediction was discarded
182 #[default]
183 Discarded,
184 /// The current prediction was explicitly rejected by the user
185 Rejected,
186}
187
188#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Clone, Copy, Serialize, Deserialize)]
189#[serde(rename_all = "snake_case")]
190pub enum CompletionIntent {
191 UserPrompt,
192 ToolResults,
193 ThreadSummarization,
194 ThreadContextSummarization,
195 CreateFile,
196 EditFile,
197 InlineAssist,
198 TerminalInlineAssist,
199 GenerateGitCommitMessage,
200}
201
202#[derive(Debug, Serialize, Deserialize)]
203pub struct CompletionBody {
204 #[serde(skip_serializing_if = "Option::is_none", default)]
205 pub thread_id: Option<String>,
206 #[serde(skip_serializing_if = "Option::is_none", default)]
207 pub prompt_id: Option<String>,
208 #[serde(skip_serializing_if = "Option::is_none", default)]
209 pub intent: Option<CompletionIntent>,
210 pub provider: LanguageModelProvider,
211 pub model: String,
212 pub provider_request: serde_json::Value,
213}
214
215#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)]
216#[serde(rename_all = "snake_case")]
217pub enum CompletionRequestStatus {
218 Queued {
219 position: usize,
220 },
221 Started,
222 Failed {
223 code: String,
224 message: String,
225 request_id: Uuid,
226 /// Retry duration in seconds.
227 retry_after: Option<f64>,
228 },
229 /// The cloud sends a StreamEnded message when the stream from the LLM provider finishes.
230 StreamEnded,
231 #[serde(other)]
232 Unknown,
233}
234
235#[derive(Serialize, Deserialize)]
236#[serde(rename_all = "snake_case")]
237pub enum CompletionEvent<T> {
238 Status(CompletionRequestStatus),
239 Event(T),
240}
241
242impl<T> CompletionEvent<T> {
243 pub fn into_status(self) -> Option<CompletionRequestStatus> {
244 match self {
245 Self::Status(status) => Some(status),
246 Self::Event(_) => None,
247 }
248 }
249
250 pub fn into_event(self) -> Option<T> {
251 match self {
252 Self::Event(event) => Some(event),
253 Self::Status(_) => None,
254 }
255 }
256}
257
258#[derive(Serialize, Deserialize)]
259pub struct WebSearchBody {
260 pub query: String,
261}
262
263#[derive(Debug, Serialize, Deserialize, Clone)]
264pub struct WebSearchResponse {
265 pub results: Vec<WebSearchResult>,
266}
267
268#[derive(Debug, Serialize, Deserialize, Clone)]
269pub struct WebSearchResult {
270 pub title: String,
271 pub url: String,
272 pub text: String,
273}
274
275#[derive(Serialize, Deserialize)]
276pub struct CountTokensBody {
277 pub provider: LanguageModelProvider,
278 pub model: String,
279 pub provider_request: serde_json::Value,
280}
281
282#[derive(Serialize, Deserialize)]
283pub struct CountTokensResponse {
284 pub tokens: usize,
285}
286
287#[derive(Debug, PartialEq, Eq, Hash, Clone, Serialize, Deserialize)]
288pub struct LanguageModelId(pub Arc<str>);
289
290impl std::fmt::Display for LanguageModelId {
291 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
292 write!(f, "{}", self.0)
293 }
294}
295
296#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
297pub struct LanguageModel {
298 pub provider: LanguageModelProvider,
299 pub id: LanguageModelId,
300 pub display_name: String,
301 #[serde(default)]
302 pub is_latest: bool,
303 pub max_token_count: usize,
304 pub max_token_count_in_max_mode: Option<usize>,
305 pub max_output_tokens: usize,
306 pub supports_tools: bool,
307 pub supports_images: bool,
308 pub supports_thinking: bool,
309 #[serde(default)]
310 pub supports_fast_mode: bool,
311 pub supported_effort_levels: Vec<SupportedEffortLevel>,
312 #[serde(default)]
313 pub supports_streaming_tools: bool,
314 /// Only used by OpenAI and xAI.
315 #[serde(default)]
316 pub supports_parallel_tool_calls: bool,
317}
318
319#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
320pub struct SupportedEffortLevel {
321 pub name: Arc<str>,
322 pub value: Arc<str>,
323 #[serde(default, skip_serializing_if = "Option::is_none")]
324 pub is_default: Option<bool>,
325}
326
327#[derive(Debug, Serialize, Deserialize)]
328pub struct ListModelsResponse {
329 pub models: Vec<LanguageModel>,
330 pub default_model: Option<LanguageModelId>,
331 pub default_fast_model: Option<LanguageModelId>,
332 pub recommended_models: Vec<LanguageModelId>,
333}
334
335#[derive(Debug, PartialEq, Serialize, Deserialize)]
336pub struct CurrentUsage {
337 pub edit_predictions: UsageData,
338}
339
340#[derive(Debug, PartialEq, Serialize, Deserialize)]
341pub struct UsageData {
342 pub used: u32,
343 pub limit: UsageLimit,
344}
345
346#[cfg(test)]
347mod tests {
348 use super::*;
349
350 #[test]
351 fn test_usage_limit_from_str() {
352 let limit = UsageLimit::from_str("unlimited").unwrap();
353 assert!(matches!(limit, UsageLimit::Unlimited));
354
355 let limit = UsageLimit::from_str(&0.to_string()).unwrap();
356 assert!(matches!(limit, UsageLimit::Limited(0)));
357
358 let limit = UsageLimit::from_str(&50.to_string()).unwrap();
359 assert!(matches!(limit, UsageLimit::Limited(50)));
360
361 for value in ["not_a_number", "50xyz"] {
362 let limit = UsageLimit::from_str(value);
363 assert!(limit.is_err());
364 }
365 }
366}