1mod model;
2mod rate_limiter;
3mod registry;
4mod request;
5mod role;
6mod telemetry;
7
8#[cfg(any(test, feature = "test-support"))]
9pub mod fake_provider;
10
11use anyhow::{Context as _, Result};
12use client::Client;
13use futures::FutureExt;
14use futures::{StreamExt, future::BoxFuture, stream::BoxStream};
15use gpui::{AnyElement, AnyView, App, AsyncApp, SharedString, Task, Window};
16use http_client::http::{HeaderMap, HeaderValue};
17use icons::IconName;
18use parking_lot::Mutex;
19use schemars::JsonSchema;
20use serde::{Deserialize, Serialize, de::DeserializeOwned};
21use std::fmt;
22use std::ops::{Add, Sub};
23use std::str::FromStr as _;
24use std::sync::Arc;
25use std::time::Duration;
26use thiserror::Error;
27use util::serde::is_default;
28use zed_llm_client::{
29 CompletionRequestStatus, MODEL_REQUESTS_USAGE_AMOUNT_HEADER_NAME,
30 MODEL_REQUESTS_USAGE_LIMIT_HEADER_NAME, UsageLimit,
31};
32
33pub use crate::model::*;
34pub use crate::rate_limiter::*;
35pub use crate::registry::*;
36pub use crate::request::*;
37pub use crate::role::*;
38pub use crate::telemetry::*;
39
40pub const ZED_CLOUD_PROVIDER_ID: &str = "zed.dev";
41
42pub fn init(client: Arc<Client>, cx: &mut App) {
43 init_settings(cx);
44 RefreshLlmTokenListener::register(client.clone(), cx);
45}
46
47pub fn init_settings(cx: &mut App) {
48 registry::init(cx);
49}
50
51/// Configuration for caching language model messages.
52#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema)]
53pub struct LanguageModelCacheConfiguration {
54 pub max_cache_anchors: usize,
55 pub should_speculate: bool,
56 pub min_total_token: u64,
57}
58
59/// A completion event from a language model.
60#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)]
61pub enum LanguageModelCompletionEvent {
62 StatusUpdate(CompletionRequestStatus),
63 Stop(StopReason),
64 Text(String),
65 Thinking {
66 text: String,
67 signature: Option<String>,
68 },
69 RedactedThinking {
70 data: String,
71 },
72 ToolUse(LanguageModelToolUse),
73 StartMessage {
74 message_id: String,
75 },
76 UsageUpdate(TokenUsage),
77}
78
79#[derive(Error, Debug)]
80pub enum LanguageModelCompletionError {
81 #[error("rate limit exceeded, retry after {0:?}")]
82 RateLimit(Duration),
83 #[error("received bad input JSON")]
84 BadInputJson {
85 id: LanguageModelToolUseId,
86 tool_name: Arc<str>,
87 raw_input: Arc<str>,
88 json_parse_error: String,
89 },
90 #[error(transparent)]
91 Other(#[from] anyhow::Error),
92}
93
94/// Indicates the format used to define the input schema for a language model tool.
95#[derive(Debug, PartialEq, Eq, Clone, Copy, Hash)]
96pub enum LanguageModelToolSchemaFormat {
97 /// A JSON schema, see https://json-schema.org
98 JsonSchema,
99 /// A subset of an OpenAPI 3.0 schema object supported by Google AI, see https://ai.google.dev/api/caching#Schema
100 JsonSchemaSubset,
101}
102
103#[derive(Debug, PartialEq, Clone, Copy, Serialize, Deserialize)]
104#[serde(rename_all = "snake_case")]
105pub enum StopReason {
106 EndTurn,
107 MaxTokens,
108 ToolUse,
109 Refusal,
110}
111
112#[derive(Debug, Clone, Copy)]
113pub struct RequestUsage {
114 pub limit: UsageLimit,
115 pub amount: i32,
116}
117
118impl RequestUsage {
119 pub fn from_headers(headers: &HeaderMap<HeaderValue>) -> Result<Self> {
120 let limit = headers
121 .get(MODEL_REQUESTS_USAGE_LIMIT_HEADER_NAME)
122 .with_context(|| {
123 format!("missing {MODEL_REQUESTS_USAGE_LIMIT_HEADER_NAME:?} header")
124 })?;
125 let limit = UsageLimit::from_str(limit.to_str()?)?;
126
127 let amount = headers
128 .get(MODEL_REQUESTS_USAGE_AMOUNT_HEADER_NAME)
129 .with_context(|| {
130 format!("missing {MODEL_REQUESTS_USAGE_AMOUNT_HEADER_NAME:?} header")
131 })?;
132 let amount = amount.to_str()?.parse::<i32>()?;
133
134 Ok(Self { limit, amount })
135 }
136}
137
138#[derive(Debug, PartialEq, Clone, Copy, Serialize, Deserialize, Default)]
139pub struct TokenUsage {
140 #[serde(default, skip_serializing_if = "is_default")]
141 pub input_tokens: u64,
142 #[serde(default, skip_serializing_if = "is_default")]
143 pub output_tokens: u64,
144 #[serde(default, skip_serializing_if = "is_default")]
145 pub cache_creation_input_tokens: u64,
146 #[serde(default, skip_serializing_if = "is_default")]
147 pub cache_read_input_tokens: u64,
148}
149
150impl TokenUsage {
151 pub fn total_tokens(&self) -> u64 {
152 self.input_tokens
153 + self.output_tokens
154 + self.cache_read_input_tokens
155 + self.cache_creation_input_tokens
156 }
157}
158
159impl Add<TokenUsage> for TokenUsage {
160 type Output = Self;
161
162 fn add(self, other: Self) -> Self {
163 Self {
164 input_tokens: self.input_tokens + other.input_tokens,
165 output_tokens: self.output_tokens + other.output_tokens,
166 cache_creation_input_tokens: self.cache_creation_input_tokens
167 + other.cache_creation_input_tokens,
168 cache_read_input_tokens: self.cache_read_input_tokens + other.cache_read_input_tokens,
169 }
170 }
171}
172
173impl Sub<TokenUsage> for TokenUsage {
174 type Output = Self;
175
176 fn sub(self, other: Self) -> Self {
177 Self {
178 input_tokens: self.input_tokens - other.input_tokens,
179 output_tokens: self.output_tokens - other.output_tokens,
180 cache_creation_input_tokens: self.cache_creation_input_tokens
181 - other.cache_creation_input_tokens,
182 cache_read_input_tokens: self.cache_read_input_tokens - other.cache_read_input_tokens,
183 }
184 }
185}
186
187#[derive(Debug, PartialEq, Eq, Hash, Clone, Serialize, Deserialize)]
188pub struct LanguageModelToolUseId(Arc<str>);
189
190impl fmt::Display for LanguageModelToolUseId {
191 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
192 write!(f, "{}", self.0)
193 }
194}
195
196impl<T> From<T> for LanguageModelToolUseId
197where
198 T: Into<Arc<str>>,
199{
200 fn from(value: T) -> Self {
201 Self(value.into())
202 }
203}
204
205#[derive(Debug, PartialEq, Eq, Hash, Clone, Serialize, Deserialize)]
206pub struct LanguageModelToolUse {
207 pub id: LanguageModelToolUseId,
208 pub name: Arc<str>,
209 pub raw_input: String,
210 pub input: serde_json::Value,
211 pub is_input_complete: bool,
212}
213
214pub struct LanguageModelTextStream {
215 pub message_id: Option<String>,
216 pub stream: BoxStream<'static, Result<String, LanguageModelCompletionError>>,
217 // Has complete token usage after the stream has finished
218 pub last_token_usage: Arc<Mutex<TokenUsage>>,
219}
220
221impl Default for LanguageModelTextStream {
222 fn default() -> Self {
223 Self {
224 message_id: None,
225 stream: Box::pin(futures::stream::empty()),
226 last_token_usage: Arc::new(Mutex::new(TokenUsage::default())),
227 }
228 }
229}
230
231pub trait LanguageModel: Send + Sync {
232 fn id(&self) -> LanguageModelId;
233 fn name(&self) -> LanguageModelName;
234 fn provider_id(&self) -> LanguageModelProviderId;
235 fn provider_name(&self) -> LanguageModelProviderName;
236 fn telemetry_id(&self) -> String;
237
238 fn api_key(&self, _cx: &App) -> Option<String> {
239 None
240 }
241
242 /// Whether this model supports images
243 fn supports_images(&self) -> bool;
244
245 /// Whether this model supports tools.
246 fn supports_tools(&self) -> bool;
247
248 /// Whether this model supports choosing which tool to use.
249 fn supports_tool_choice(&self, choice: LanguageModelToolChoice) -> bool;
250
251 /// Returns whether this model supports "burn mode";
252 fn supports_max_mode(&self) -> bool {
253 false
254 }
255
256 fn tool_input_format(&self) -> LanguageModelToolSchemaFormat {
257 LanguageModelToolSchemaFormat::JsonSchema
258 }
259
260 fn max_token_count(&self) -> u64;
261 fn max_output_tokens(&self) -> Option<u64> {
262 None
263 }
264
265 fn count_tokens(
266 &self,
267 request: LanguageModelRequest,
268 cx: &App,
269 ) -> BoxFuture<'static, Result<u64>>;
270
271 fn stream_completion(
272 &self,
273 request: LanguageModelRequest,
274 cx: &AsyncApp,
275 ) -> BoxFuture<
276 'static,
277 Result<
278 BoxStream<'static, Result<LanguageModelCompletionEvent, LanguageModelCompletionError>>,
279 LanguageModelCompletionError,
280 >,
281 >;
282
283 fn stream_completion_text(
284 &self,
285 request: LanguageModelRequest,
286 cx: &AsyncApp,
287 ) -> BoxFuture<'static, Result<LanguageModelTextStream, LanguageModelCompletionError>> {
288 let future = self.stream_completion(request, cx);
289
290 async move {
291 let events = future.await?;
292 let mut events = events.fuse();
293 let mut message_id = None;
294 let mut first_item_text = None;
295 let last_token_usage = Arc::new(Mutex::new(TokenUsage::default()));
296
297 if let Some(first_event) = events.next().await {
298 match first_event {
299 Ok(LanguageModelCompletionEvent::StartMessage { message_id: id }) => {
300 message_id = Some(id.clone());
301 }
302 Ok(LanguageModelCompletionEvent::Text(text)) => {
303 first_item_text = Some(text);
304 }
305 _ => (),
306 }
307 }
308
309 let stream = futures::stream::iter(first_item_text.map(Ok))
310 .chain(events.filter_map({
311 let last_token_usage = last_token_usage.clone();
312 move |result| {
313 let last_token_usage = last_token_usage.clone();
314 async move {
315 match result {
316 Ok(LanguageModelCompletionEvent::StatusUpdate { .. }) => None,
317 Ok(LanguageModelCompletionEvent::StartMessage { .. }) => None,
318 Ok(LanguageModelCompletionEvent::Text(text)) => Some(Ok(text)),
319 Ok(LanguageModelCompletionEvent::Thinking { .. }) => None,
320 Ok(LanguageModelCompletionEvent::RedactedThinking { .. }) => None,
321 Ok(LanguageModelCompletionEvent::Stop(_)) => None,
322 Ok(LanguageModelCompletionEvent::ToolUse(_)) => None,
323 Ok(LanguageModelCompletionEvent::UsageUpdate(token_usage)) => {
324 *last_token_usage.lock() = token_usage;
325 None
326 }
327 Err(err) => Some(Err(err)),
328 }
329 }
330 }
331 }))
332 .boxed();
333
334 Ok(LanguageModelTextStream {
335 message_id,
336 stream,
337 last_token_usage,
338 })
339 }
340 .boxed()
341 }
342
343 fn cache_configuration(&self) -> Option<LanguageModelCacheConfiguration> {
344 None
345 }
346
347 #[cfg(any(test, feature = "test-support"))]
348 fn as_fake(&self) -> &fake_provider::FakeLanguageModel {
349 unimplemented!()
350 }
351}
352
353#[derive(Debug, Error)]
354pub enum LanguageModelKnownError {
355 #[error("Context window limit exceeded ({tokens})")]
356 ContextWindowLimitExceeded { tokens: u64 },
357}
358
359pub trait LanguageModelTool: 'static + DeserializeOwned + JsonSchema {
360 fn name() -> String;
361 fn description() -> String;
362}
363
364/// An error that occurred when trying to authenticate the language model provider.
365#[derive(Debug, Error)]
366pub enum AuthenticateError {
367 #[error("credentials not found")]
368 CredentialsNotFound,
369 #[error(transparent)]
370 Other(#[from] anyhow::Error),
371}
372
373pub trait LanguageModelProvider: 'static {
374 fn id(&self) -> LanguageModelProviderId;
375 fn name(&self) -> LanguageModelProviderName;
376 fn icon(&self) -> IconName {
377 IconName::ZedAssistant
378 }
379 fn default_model(&self, cx: &App) -> Option<Arc<dyn LanguageModel>>;
380 fn default_fast_model(&self, cx: &App) -> Option<Arc<dyn LanguageModel>>;
381 fn provided_models(&self, cx: &App) -> Vec<Arc<dyn LanguageModel>>;
382 fn recommended_models(&self, _cx: &App) -> Vec<Arc<dyn LanguageModel>> {
383 Vec::new()
384 }
385 fn is_authenticated(&self, cx: &App) -> bool;
386 fn authenticate(&self, cx: &mut App) -> Task<Result<(), AuthenticateError>>;
387 fn configuration_view(&self, window: &mut Window, cx: &mut App) -> AnyView;
388 fn must_accept_terms(&self, _cx: &App) -> bool {
389 false
390 }
391 fn render_accept_terms(
392 &self,
393 _view: LanguageModelProviderTosView,
394 _cx: &mut App,
395 ) -> Option<AnyElement> {
396 None
397 }
398 fn reset_credentials(&self, cx: &mut App) -> Task<Result<()>>;
399}
400
401#[derive(PartialEq, Eq)]
402pub enum LanguageModelProviderTosView {
403 /// When there are some past interactions in the Agent Panel.
404 ThreadtEmptyState,
405 /// When there are no past interactions in the Agent Panel.
406 ThreadFreshStart,
407 PromptEditorPopup,
408 Configuration,
409}
410
411pub trait LanguageModelProviderState: 'static {
412 type ObservableEntity;
413
414 fn observable_entity(&self) -> Option<gpui::Entity<Self::ObservableEntity>>;
415
416 fn subscribe<T: 'static>(
417 &self,
418 cx: &mut gpui::Context<T>,
419 callback: impl Fn(&mut T, &mut gpui::Context<T>) + 'static,
420 ) -> Option<gpui::Subscription> {
421 let entity = self.observable_entity()?;
422 Some(cx.observe(&entity, move |this, _, cx| {
423 callback(this, cx);
424 }))
425 }
426}
427
428#[derive(Clone, Eq, PartialEq, Hash, Debug, Ord, PartialOrd, Serialize, Deserialize)]
429pub struct LanguageModelId(pub SharedString);
430
431#[derive(Clone, Eq, PartialEq, Hash, Debug, Ord, PartialOrd)]
432pub struct LanguageModelName(pub SharedString);
433
434#[derive(Clone, Eq, PartialEq, Hash, Debug, Ord, PartialOrd)]
435pub struct LanguageModelProviderId(pub SharedString);
436
437#[derive(Clone, Eq, PartialEq, Hash, Debug, Ord, PartialOrd)]
438pub struct LanguageModelProviderName(pub SharedString);
439
440impl fmt::Display for LanguageModelProviderId {
441 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
442 write!(f, "{}", self.0)
443 }
444}
445
446impl From<String> for LanguageModelId {
447 fn from(value: String) -> Self {
448 Self(SharedString::from(value))
449 }
450}
451
452impl From<String> for LanguageModelName {
453 fn from(value: String) -> Self {
454 Self(SharedString::from(value))
455 }
456}
457
458impl From<String> for LanguageModelProviderId {
459 fn from(value: String) -> Self {
460 Self(SharedString::from(value))
461 }
462}
463
464impl From<String> for LanguageModelProviderName {
465 fn from(value: String) -> Self {
466 Self(SharedString::from(value))
467 }
468}