1pub mod predict_edits_v3;
2
3use std::str::FromStr;
4use std::sync::Arc;
5
6use anyhow::Context as _;
7use serde::{Deserialize, Serialize};
8use strum::{Display, EnumIter, EnumString};
9use uuid::Uuid;
10
11/// The name of the header used to indicate which version of Zed the client is running.
12pub const ZED_VERSION_HEADER_NAME: &str = "x-zed-version";
13
14/// The name of the header used to indicate when a request failed due to an
15/// expired LLM token.
16///
17/// The client may use this as a signal to refresh the token.
18pub const EXPIRED_LLM_TOKEN_HEADER_NAME: &str = "x-zed-expired-token";
19
20/// The name of the header used to indicate when a request failed due to an outdated LLM token.
21///
22/// A token is considered "outdated" when we can't parse the claims (e.g., after adding a new required claim).
23///
24/// This is distinct from [`EXPIRED_LLM_TOKEN_HEADER_NAME`] which indicates the token's time-based validity has passed.
25/// An outdated token means the token's structure is incompatible with the current server expectations.
26pub const OUTDATED_LLM_TOKEN_HEADER_NAME: &str = "x-zed-outdated-token";
27
28/// The name of the header used to indicate the usage limit for edit predictions.
29pub const EDIT_PREDICTIONS_USAGE_LIMIT_HEADER_NAME: &str = "x-zed-edit-predictions-usage-limit";
30
31/// The name of the header used to indicate the usage amount for edit predictions.
32pub const EDIT_PREDICTIONS_USAGE_AMOUNT_HEADER_NAME: &str = "x-zed-edit-predictions-usage-amount";
33
34pub const EDIT_PREDICTIONS_RESOURCE_HEADER_VALUE: &str = "edit_predictions";
35
36/// The name of the header used to indicate the minimum required Zed version.
37///
38/// This can be used to force a Zed upgrade in order to continue communicating
39/// with the LLM service.
40pub const MINIMUM_REQUIRED_VERSION_HEADER_NAME: &str = "x-zed-minimum-required-version";
41
42/// The name of the header used by the client to indicate to the server that it supports receiving status messages.
43pub const CLIENT_SUPPORTS_STATUS_MESSAGES_HEADER_NAME: &str =
44 "x-zed-client-supports-status-messages";
45
46/// The name of the header used by the client to indicate to the server that it supports receiving a "stream_ended" request completion status.
47pub const CLIENT_SUPPORTS_STATUS_STREAM_ENDED_HEADER_NAME: &str =
48 "x-zed-client-supports-stream-ended-request-completion-status";
49
50/// The name of the header used by the server to indicate to the client that it supports sending status messages.
51pub const SERVER_SUPPORTS_STATUS_MESSAGES_HEADER_NAME: &str =
52 "x-zed-server-supports-status-messages";
53
54/// The name of the header used by the client to indicate that it supports receiving xAI models.
55pub const CLIENT_SUPPORTS_X_AI_HEADER_NAME: &str = "x-zed-client-supports-x-ai";
56
57/// The maximum number of edit predictions that can be rejected per request.
58pub const MAX_EDIT_PREDICTION_REJECTIONS_PER_REQUEST: usize = 100;
59
60#[derive(Debug, PartialEq, Clone, Copy, Serialize, Deserialize)]
61#[serde(rename_all = "snake_case")]
62pub enum UsageLimit {
63 Limited(i32),
64 Unlimited,
65}
66
67impl FromStr for UsageLimit {
68 type Err = anyhow::Error;
69
70 fn from_str(value: &str) -> Result<Self, Self::Err> {
71 match value {
72 "unlimited" => Ok(Self::Unlimited),
73 limit => limit
74 .parse::<i32>()
75 .map(Self::Limited)
76 .context("failed to parse limit"),
77 }
78 }
79}
80
81#[derive(
82 Debug, PartialEq, Eq, Hash, Clone, Copy, Serialize, Deserialize, EnumString, EnumIter, Display,
83)]
84#[serde(rename_all = "snake_case")]
85#[strum(serialize_all = "snake_case")]
86pub enum LanguageModelProvider {
87 Anthropic,
88 OpenAi,
89 Google,
90 XAi,
91}
92
93#[derive(Debug, Clone, Serialize, Deserialize)]
94pub struct PredictEditsBody {
95 #[serde(skip_serializing_if = "Option::is_none", default)]
96 pub outline: Option<String>,
97 pub input_events: String,
98 pub input_excerpt: String,
99 #[serde(skip_serializing_if = "Option::is_none", default)]
100 pub speculated_output: Option<String>,
101 /// Whether the user provided consent for sampling this interaction.
102 #[serde(default, alias = "data_collection_permission")]
103 pub can_collect_data: bool,
104 #[serde(skip_serializing_if = "Option::is_none", default)]
105 pub diagnostic_groups: Option<Vec<(String, serde_json::Value)>>,
106 /// Info about the git repository state, only present when can_collect_data is true.
107 #[serde(skip_serializing_if = "Option::is_none", default)]
108 pub git_info: Option<PredictEditsGitInfo>,
109 /// The trigger for this request.
110 #[serde(default)]
111 pub trigger: PredictEditsRequestTrigger,
112}
113
114#[derive(Default, Debug, Clone, Copy, Serialize, Deserialize)]
115pub enum PredictEditsRequestTrigger {
116 Testing,
117 Diagnostics,
118 Cli,
119 #[default]
120 Other,
121}
122
123#[derive(Debug, Clone, Serialize, Deserialize)]
124pub struct PredictEditsGitInfo {
125 /// SHA of git HEAD commit at time of prediction.
126 #[serde(skip_serializing_if = "Option::is_none", default)]
127 pub head_sha: Option<String>,
128 /// URL of the remote called `origin`.
129 #[serde(skip_serializing_if = "Option::is_none", default)]
130 pub remote_origin_url: Option<String>,
131 /// URL of the remote called `upstream`.
132 #[serde(skip_serializing_if = "Option::is_none", default)]
133 pub remote_upstream_url: Option<String>,
134}
135
136#[derive(Debug, Clone, Serialize, Deserialize)]
137pub struct PredictEditsResponse {
138 pub request_id: String,
139 pub output_excerpt: String,
140}
141
142#[derive(Debug, Clone, Serialize, Deserialize)]
143pub struct AcceptEditPredictionBody {
144 pub request_id: String,
145 #[serde(default, skip_serializing_if = "Option::is_none")]
146 pub model_version: Option<String>,
147 #[serde(default, skip_serializing_if = "Option::is_none")]
148 pub e2e_latency_ms: Option<u128>,
149}
150
151#[derive(Debug, Clone, Deserialize)]
152pub struct RejectEditPredictionsBody {
153 pub rejections: Vec<EditPredictionRejection>,
154}
155
156#[derive(Debug, Clone, Serialize)]
157pub struct RejectEditPredictionsBodyRef<'a> {
158 pub rejections: &'a [EditPredictionRejection],
159}
160
161#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
162pub struct EditPredictionRejection {
163 pub request_id: String,
164 #[serde(default)]
165 pub reason: EditPredictionRejectReason,
166 pub was_shown: bool,
167 #[serde(default, skip_serializing_if = "Option::is_none")]
168 pub model_version: Option<String>,
169 #[serde(default, skip_serializing_if = "Option::is_none")]
170 pub e2e_latency_ms: Option<u128>,
171}
172
173#[derive(Default, Debug, Clone, Copy, Serialize, Deserialize, PartialEq)]
174pub enum EditPredictionRejectReason {
175 /// New requests were triggered before this one completed
176 Canceled,
177 /// No edits returned
178 Empty,
179 /// Edits returned, but none remained after interpolation
180 InterpolatedEmpty,
181 /// The new prediction was preferred over the current one
182 Replaced,
183 /// The current prediction was preferred over the new one
184 CurrentPreferred,
185 /// The current prediction was discarded
186 #[default]
187 Discarded,
188 /// The current prediction was explicitly rejected by the user
189 Rejected,
190}
191
192#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Clone, Copy, Serialize, Deserialize)]
193#[serde(rename_all = "snake_case")]
194pub enum CompletionIntent {
195 UserPrompt,
196 ToolResults,
197 ThreadSummarization,
198 ThreadContextSummarization,
199 CreateFile,
200 EditFile,
201 InlineAssist,
202 TerminalInlineAssist,
203 GenerateGitCommitMessage,
204}
205
206#[derive(Debug, Serialize, Deserialize)]
207pub struct CompletionBody {
208 #[serde(skip_serializing_if = "Option::is_none", default)]
209 pub thread_id: Option<String>,
210 #[serde(skip_serializing_if = "Option::is_none", default)]
211 pub prompt_id: Option<String>,
212 #[serde(skip_serializing_if = "Option::is_none", default)]
213 pub intent: Option<CompletionIntent>,
214 pub provider: LanguageModelProvider,
215 pub model: String,
216 pub provider_request: serde_json::Value,
217}
218
219#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)]
220#[serde(rename_all = "snake_case")]
221pub enum CompletionRequestStatus {
222 Queued {
223 position: usize,
224 },
225 Started,
226 Failed {
227 code: String,
228 message: String,
229 request_id: Uuid,
230 /// Retry duration in seconds.
231 retry_after: Option<f64>,
232 },
233 /// The cloud sends a StreamEnded message when the stream from the LLM provider finishes.
234 StreamEnded,
235 #[serde(other)]
236 Unknown,
237}
238
239#[derive(Serialize, Deserialize)]
240#[serde(rename_all = "snake_case")]
241pub enum CompletionEvent<T> {
242 Status(CompletionRequestStatus),
243 Event(T),
244}
245
246impl<T> CompletionEvent<T> {
247 pub fn into_status(self) -> Option<CompletionRequestStatus> {
248 match self {
249 Self::Status(status) => Some(status),
250 Self::Event(_) => None,
251 }
252 }
253
254 pub fn into_event(self) -> Option<T> {
255 match self {
256 Self::Event(event) => Some(event),
257 Self::Status(_) => None,
258 }
259 }
260}
261
262#[derive(Serialize, Deserialize)]
263pub struct WebSearchBody {
264 pub query: String,
265}
266
267#[derive(Debug, Serialize, Deserialize, Clone)]
268pub struct WebSearchResponse {
269 pub results: Vec<WebSearchResult>,
270}
271
272#[derive(Debug, Serialize, Deserialize, Clone)]
273pub struct WebSearchResult {
274 pub title: String,
275 pub url: String,
276 pub text: String,
277}
278
279#[derive(Serialize, Deserialize)]
280pub struct CountTokensBody {
281 pub provider: LanguageModelProvider,
282 pub model: String,
283 pub provider_request: serde_json::Value,
284}
285
286#[derive(Serialize, Deserialize)]
287pub struct CountTokensResponse {
288 pub tokens: usize,
289}
290
291#[derive(Debug, PartialEq, Eq, Hash, Clone, Serialize, Deserialize)]
292pub struct LanguageModelId(pub Arc<str>);
293
294impl std::fmt::Display for LanguageModelId {
295 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
296 write!(f, "{}", self.0)
297 }
298}
299
300#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
301pub struct LanguageModel {
302 pub provider: LanguageModelProvider,
303 pub id: LanguageModelId,
304 pub display_name: String,
305 #[serde(default)]
306 pub is_latest: bool,
307 pub max_token_count: usize,
308 pub max_token_count_in_max_mode: Option<usize>,
309 pub max_output_tokens: usize,
310 pub supports_tools: bool,
311 pub supports_images: bool,
312 pub supports_thinking: bool,
313 #[serde(default)]
314 pub supports_fast_mode: bool,
315 pub supported_effort_levels: Vec<SupportedEffortLevel>,
316 #[serde(default)]
317 pub supports_streaming_tools: bool,
318 /// Only used by OpenAI and xAI.
319 #[serde(default)]
320 pub supports_parallel_tool_calls: bool,
321}
322
323#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
324pub struct SupportedEffortLevel {
325 pub name: Arc<str>,
326 pub value: Arc<str>,
327 #[serde(default, skip_serializing_if = "Option::is_none")]
328 pub is_default: Option<bool>,
329}
330
331#[derive(Debug, Serialize, Deserialize)]
332pub struct ListModelsResponse {
333 pub models: Vec<LanguageModel>,
334 pub default_model: Option<LanguageModelId>,
335 pub default_fast_model: Option<LanguageModelId>,
336 pub recommended_models: Vec<LanguageModelId>,
337}
338
339#[derive(Debug, PartialEq, Serialize, Deserialize)]
340pub struct CurrentUsage {
341 pub edit_predictions: UsageData,
342}
343
344#[derive(Debug, PartialEq, Serialize, Deserialize)]
345pub struct UsageData {
346 pub used: u32,
347 pub limit: UsageLimit,
348}
349
350#[cfg(test)]
351mod tests {
352 use super::*;
353
354 #[test]
355 fn test_usage_limit_from_str() {
356 let limit = UsageLimit::from_str("unlimited").unwrap();
357 assert!(matches!(limit, UsageLimit::Unlimited));
358
359 let limit = UsageLimit::from_str(&0.to_string()).unwrap();
360 assert!(matches!(limit, UsageLimit::Limited(0)));
361
362 let limit = UsageLimit::from_str(&50.to_string()).unwrap();
363 assert!(matches!(limit, UsageLimit::Limited(50)));
364
365 for value in ["not_a_number", "50xyz"] {
366 let limit = UsageLimit::from_str(value);
367 assert!(limit.is_err());
368 }
369 }
370}