1pub mod predict_edits_v3;
2
3use std::str::FromStr;
4use std::sync::Arc;
5
6use anyhow::Context as _;
7use serde::{Deserialize, Serialize};
8use strum::{Display, EnumIter, EnumString};
9use uuid::Uuid;
10
11/// The name of the header used to indicate which version of Zed the client is running.
12pub const ZED_VERSION_HEADER_NAME: &str = "x-zed-version";
13
14/// The name of the header used to indicate when a request failed due to an
15/// expired LLM token.
16///
17/// The client may use this as a signal to refresh the token.
18pub const EXPIRED_LLM_TOKEN_HEADER_NAME: &str = "x-zed-expired-token";
19
20/// The name of the header used to indicate when a request failed due to an outdated LLM token.
21///
22/// A token is considered "outdated" when we can't parse the claims (e.g., after adding a new required claim).
23///
24/// This is distinct from [`EXPIRED_LLM_TOKEN_HEADER_NAME`] which indicates the token's time-based validity has passed.
25/// An outdated token means the token's structure is incompatible with the current server expectations.
26pub const OUTDATED_LLM_TOKEN_HEADER_NAME: &str = "x-zed-outdated-token";
27
28/// The name of the header used to indicate the usage limit for edit predictions.
29pub const EDIT_PREDICTIONS_USAGE_LIMIT_HEADER_NAME: &str = "x-zed-edit-predictions-usage-limit";
30
31/// The name of the header used to indicate the usage amount for edit predictions.
32pub const EDIT_PREDICTIONS_USAGE_AMOUNT_HEADER_NAME: &str = "x-zed-edit-predictions-usage-amount";
33
34pub const EDIT_PREDICTIONS_RESOURCE_HEADER_VALUE: &str = "edit_predictions";
35
36/// The name of the header used to indicate the minimum required Zed version.
37///
38/// This can be used to force a Zed upgrade in order to continue communicating
39/// with the LLM service.
40pub const MINIMUM_REQUIRED_VERSION_HEADER_NAME: &str = "x-zed-minimum-required-version";
41
42/// The name of the header used by the client to indicate to the server that it supports receiving status messages.
43pub const CLIENT_SUPPORTS_STATUS_MESSAGES_HEADER_NAME: &str =
44 "x-zed-client-supports-status-messages";
45
46/// The name of the header used by the client to indicate to the server that it supports receiving a "stream_ended" request completion status.
47pub const CLIENT_SUPPORTS_STATUS_STREAM_ENDED_HEADER_NAME: &str =
48 "x-zed-client-supports-stream-ended-request-completion-status";
49
50/// The name of the header used by the server to indicate to the client that it supports sending status messages.
51pub const SERVER_SUPPORTS_STATUS_MESSAGES_HEADER_NAME: &str =
52 "x-zed-server-supports-status-messages";
53
54/// The name of the header used by the client to indicate that it supports receiving xAI models.
55pub const CLIENT_SUPPORTS_X_AI_HEADER_NAME: &str = "x-zed-client-supports-x-ai";
56
57/// The maximum number of edit predictions that can be rejected per request.
58pub const MAX_EDIT_PREDICTION_REJECTIONS_PER_REQUEST: usize = 100;
59
60#[derive(Debug, PartialEq, Clone, Copy, Serialize, Deserialize)]
61#[serde(rename_all = "snake_case")]
62pub enum UsageLimit {
63 Limited(i32),
64 Unlimited,
65}
66
67impl FromStr for UsageLimit {
68 type Err = anyhow::Error;
69
70 fn from_str(value: &str) -> Result<Self, Self::Err> {
71 match value {
72 "unlimited" => Ok(Self::Unlimited),
73 limit => limit
74 .parse::<i32>()
75 .map(Self::Limited)
76 .context("failed to parse limit"),
77 }
78 }
79}
80
81#[derive(
82 Debug, PartialEq, Eq, Hash, Clone, Copy, Serialize, Deserialize, EnumString, EnumIter, Display,
83)]
84#[serde(rename_all = "snake_case")]
85#[strum(serialize_all = "snake_case")]
86pub enum LanguageModelProvider {
87 Anthropic,
88 OpenAi,
89 Google,
90 XAi,
91}
92
93#[derive(Debug, Clone, Serialize, Deserialize)]
94pub struct PredictEditsBody {
95 #[serde(skip_serializing_if = "Option::is_none", default)]
96 pub outline: Option<String>,
97 pub input_events: String,
98 pub input_excerpt: String,
99 #[serde(skip_serializing_if = "Option::is_none", default)]
100 pub speculated_output: Option<String>,
101 /// Whether the user provided consent for sampling this interaction.
102 #[serde(default, alias = "data_collection_permission")]
103 pub can_collect_data: bool,
104 #[serde(skip_serializing_if = "Option::is_none", default)]
105 pub diagnostic_groups: Option<Vec<(String, serde_json::Value)>>,
106 /// Info about the git repository state, only present when can_collect_data is true.
107 #[serde(skip_serializing_if = "Option::is_none", default)]
108 pub git_info: Option<PredictEditsGitInfo>,
109 /// The trigger for this request.
110 #[serde(default)]
111 pub trigger: PredictEditsRequestTrigger,
112}
113
114#[derive(Default, Debug, Clone, Copy, Serialize, Deserialize)]
115pub enum PredictEditsRequestTrigger {
116 Testing,
117 Diagnostics,
118 Cli,
119 #[default]
120 Other,
121}
122
123#[derive(Debug, Clone, Serialize, Deserialize)]
124pub struct PredictEditsGitInfo {
125 /// SHA of git HEAD commit at time of prediction.
126 #[serde(skip_serializing_if = "Option::is_none", default)]
127 pub head_sha: Option<String>,
128 /// URL of the remote called `origin`.
129 #[serde(skip_serializing_if = "Option::is_none", default)]
130 pub remote_origin_url: Option<String>,
131 /// URL of the remote called `upstream`.
132 #[serde(skip_serializing_if = "Option::is_none", default)]
133 pub remote_upstream_url: Option<String>,
134}
135
136#[derive(Debug, Clone, Serialize, Deserialize)]
137pub struct PredictEditsResponse {
138 pub request_id: String,
139 pub output_excerpt: String,
140}
141
142#[derive(Debug, Clone, Serialize, Deserialize)]
143pub struct AcceptEditPredictionBody {
144 pub request_id: String,
145 #[serde(default, skip_serializing_if = "Option::is_none")]
146 pub model_version: Option<String>,
147 #[serde(default, skip_serializing_if = "Option::is_none")]
148 pub e2e_latency_ms: Option<u128>,
149}
150
151#[derive(Debug, Clone, Deserialize)]
152pub struct RejectEditPredictionsBody {
153 pub rejections: Vec<EditPredictionRejection>,
154}
155
156#[derive(Debug, Clone, Serialize)]
157pub struct RejectEditPredictionsBodyRef<'a> {
158 pub rejections: &'a [EditPredictionRejection],
159}
160
161#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
162pub struct EditPredictionRejection {
163 pub request_id: String,
164 #[serde(default)]
165 pub reason: EditPredictionRejectReason,
166 pub was_shown: bool,
167 #[serde(default, skip_serializing_if = "Option::is_none")]
168 pub model_version: Option<String>,
169 #[serde(default, skip_serializing_if = "Option::is_none")]
170 pub e2e_latency_ms: Option<u128>,
171}
172
173#[derive(Default, Debug, Clone, Copy, Serialize, Deserialize, PartialEq)]
174pub enum EditPredictionRejectReason {
175 /// New requests were triggered before this one completed
176 Canceled,
177 /// No edits returned
178 Empty,
179 /// Edits returned, but none remained after interpolation
180 InterpolatedEmpty,
181 /// The new prediction was preferred over the current one
182 Replaced,
183 /// The current prediction was preferred over the new one
184 CurrentPreferred,
185 /// The current prediction was discarded
186 #[default]
187 Discarded,
188 /// The current prediction was explicitly rejected by the user
189 Rejected,
190}
191
192#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Clone, Copy, Serialize, Deserialize)]
193#[serde(rename_all = "snake_case")]
194pub enum CompletionIntent {
195 UserPrompt,
196 Subagent,
197 ToolResults,
198 ThreadSummarization,
199 ThreadContextSummarization,
200 CreateFile,
201 EditFile,
202 InlineAssist,
203 TerminalInlineAssist,
204 GenerateGitCommitMessage,
205}
206
207#[derive(Debug, Serialize, Deserialize)]
208pub struct CompletionBody {
209 #[serde(skip_serializing_if = "Option::is_none", default)]
210 pub thread_id: Option<String>,
211 #[serde(skip_serializing_if = "Option::is_none", default)]
212 pub prompt_id: Option<String>,
213 #[serde(skip_serializing_if = "Option::is_none", default)]
214 pub intent: Option<CompletionIntent>,
215 pub provider: LanguageModelProvider,
216 pub model: String,
217 pub provider_request: serde_json::Value,
218}
219
220#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)]
221#[serde(rename_all = "snake_case")]
222pub enum CompletionRequestStatus {
223 Queued {
224 position: usize,
225 },
226 Started,
227 Failed {
228 code: String,
229 message: String,
230 request_id: Uuid,
231 /// Retry duration in seconds.
232 retry_after: Option<f64>,
233 },
234 /// The cloud sends a StreamEnded message when the stream from the LLM provider finishes.
235 StreamEnded,
236 #[serde(other)]
237 Unknown,
238}
239
240#[derive(Serialize, Deserialize)]
241#[serde(rename_all = "snake_case")]
242pub enum CompletionEvent<T> {
243 Status(CompletionRequestStatus),
244 Event(T),
245}
246
247impl<T> CompletionEvent<T> {
248 pub fn into_status(self) -> Option<CompletionRequestStatus> {
249 match self {
250 Self::Status(status) => Some(status),
251 Self::Event(_) => None,
252 }
253 }
254
255 pub fn into_event(self) -> Option<T> {
256 match self {
257 Self::Event(event) => Some(event),
258 Self::Status(_) => None,
259 }
260 }
261}
262
263#[derive(Serialize, Deserialize)]
264pub struct WebSearchBody {
265 pub query: String,
266}
267
268#[derive(Debug, Serialize, Deserialize, Clone)]
269pub struct WebSearchResponse {
270 pub results: Vec<WebSearchResult>,
271}
272
273#[derive(Debug, Serialize, Deserialize, Clone)]
274pub struct WebSearchResult {
275 pub title: String,
276 pub url: String,
277 pub text: String,
278}
279
280#[derive(Serialize, Deserialize)]
281pub struct CountTokensBody {
282 pub provider: LanguageModelProvider,
283 pub model: String,
284 pub provider_request: serde_json::Value,
285}
286
287#[derive(Serialize, Deserialize)]
288pub struct CountTokensResponse {
289 pub tokens: usize,
290}
291
292#[derive(Debug, PartialEq, Eq, Hash, Clone, Serialize, Deserialize)]
293pub struct LanguageModelId(pub Arc<str>);
294
295impl std::fmt::Display for LanguageModelId {
296 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
297 write!(f, "{}", self.0)
298 }
299}
300
301#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
302pub struct LanguageModel {
303 pub provider: LanguageModelProvider,
304 pub id: LanguageModelId,
305 pub display_name: String,
306 #[serde(default)]
307 pub is_latest: bool,
308 pub max_token_count: usize,
309 pub max_token_count_in_max_mode: Option<usize>,
310 pub max_output_tokens: usize,
311 pub supports_tools: bool,
312 pub supports_images: bool,
313 pub supports_thinking: bool,
314 #[serde(default)]
315 pub supports_fast_mode: bool,
316 pub supported_effort_levels: Vec<SupportedEffortLevel>,
317 #[serde(default)]
318 pub supports_streaming_tools: bool,
319 /// Only used by OpenAI and xAI.
320 #[serde(default)]
321 pub supports_parallel_tool_calls: bool,
322}
323
324#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
325pub struct SupportedEffortLevel {
326 pub name: Arc<str>,
327 pub value: Arc<str>,
328 #[serde(default, skip_serializing_if = "Option::is_none")]
329 pub is_default: Option<bool>,
330}
331
332#[derive(Debug, Serialize, Deserialize)]
333pub struct ListModelsResponse {
334 pub models: Vec<LanguageModel>,
335 pub default_model: Option<LanguageModelId>,
336 pub default_fast_model: Option<LanguageModelId>,
337 pub recommended_models: Vec<LanguageModelId>,
338}
339
340#[derive(Debug, PartialEq, Serialize, Deserialize)]
341pub struct CurrentUsage {
342 pub edit_predictions: UsageData,
343}
344
345#[derive(Debug, PartialEq, Serialize, Deserialize)]
346pub struct UsageData {
347 pub used: u32,
348 pub limit: UsageLimit,
349}
350
351#[cfg(test)]
352mod tests {
353 use super::*;
354
355 #[test]
356 fn test_usage_limit_from_str() {
357 let limit = UsageLimit::from_str("unlimited").unwrap();
358 assert!(matches!(limit, UsageLimit::Unlimited));
359
360 let limit = UsageLimit::from_str(&0.to_string()).unwrap();
361 assert!(matches!(limit, UsageLimit::Limited(0)));
362
363 let limit = UsageLimit::from_str(&50.to_string()).unwrap();
364 assert!(matches!(limit, UsageLimit::Limited(50)));
365
366 for value in ["not_a_number", "50xyz"] {
367 let limit = UsageLimit::from_str(value);
368 assert!(limit.is_err());
369 }
370 }
371}