1mod model;
2mod rate_limiter;
3mod registry;
4mod request;
5mod role;
6mod telemetry;
7
8#[cfg(any(test, feature = "test-support"))]
9pub mod fake_provider;
10
11use anyhow::{Result, anyhow};
12use client::Client;
13use futures::FutureExt;
14use futures::{StreamExt, future::BoxFuture, stream::BoxStream};
15use gpui::{AnyElement, AnyView, App, AsyncApp, SharedString, Task, Window};
16use http_client::http::{HeaderMap, HeaderValue};
17use icons::IconName;
18use parking_lot::Mutex;
19use proto::Plan;
20use schemars::JsonSchema;
21use serde::{Deserialize, Serialize, de::DeserializeOwned};
22use std::fmt;
23use std::ops::{Add, Sub};
24use std::str::FromStr as _;
25use std::sync::Arc;
26use thiserror::Error;
27use util::serde::is_default;
28use zed_llm_client::{
29 MODEL_REQUESTS_USAGE_AMOUNT_HEADER_NAME, MODEL_REQUESTS_USAGE_LIMIT_HEADER_NAME, UsageLimit,
30};
31
32pub use crate::model::*;
33pub use crate::rate_limiter::*;
34pub use crate::registry::*;
35pub use crate::request::*;
36pub use crate::role::*;
37pub use crate::telemetry::*;
38
39pub const ZED_CLOUD_PROVIDER_ID: &str = "zed.dev";
40
41pub fn init(client: Arc<Client>, cx: &mut App) {
42 registry::init(cx);
43 RefreshLlmTokenListener::register(client.clone(), cx);
44}
45
46/// The availability of a [`LanguageModel`].
47#[derive(Debug, PartialEq, Eq, Clone, Copy)]
48pub enum LanguageModelAvailability {
49 /// The language model is available to the general public.
50 Public,
51 /// The language model is available to users on the indicated plan.
52 RequiresPlan(Plan),
53}
54
55/// Configuration for caching language model messages.
56#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema)]
57pub struct LanguageModelCacheConfiguration {
58 pub max_cache_anchors: usize,
59 pub should_speculate: bool,
60 pub min_total_token: usize,
61}
62
63/// A completion event from a language model.
64#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)]
65pub enum LanguageModelCompletionEvent {
66 Stop(StopReason),
67 Text(String),
68 Thinking {
69 text: String,
70 signature: Option<String>,
71 },
72 ToolUse(LanguageModelToolUse),
73 StartMessage {
74 message_id: String,
75 },
76 UsageUpdate(TokenUsage),
77}
78
79/// Indicates the format used to define the input schema for a language model tool.
80#[derive(Debug, PartialEq, Eq, Clone, Copy, Hash)]
81pub enum LanguageModelToolSchemaFormat {
82 /// A JSON schema, see https://json-schema.org
83 JsonSchema,
84 /// A subset of an OpenAPI 3.0 schema object supported by Google AI, see https://ai.google.dev/api/caching#Schema
85 JsonSchemaSubset,
86}
87
88#[derive(Debug, PartialEq, Clone, Copy, Serialize, Deserialize)]
89#[serde(rename_all = "snake_case")]
90pub enum StopReason {
91 EndTurn,
92 MaxTokens,
93 ToolUse,
94}
95
96#[derive(Debug, Clone, Copy)]
97pub struct RequestUsage {
98 pub limit: UsageLimit,
99 pub amount: i32,
100}
101
102impl RequestUsage {
103 pub fn from_headers(headers: &HeaderMap<HeaderValue>) -> Result<Self> {
104 let limit = headers
105 .get(MODEL_REQUESTS_USAGE_LIMIT_HEADER_NAME)
106 .ok_or_else(|| anyhow!("missing {MODEL_REQUESTS_USAGE_LIMIT_HEADER_NAME:?} header"))?;
107 let limit = UsageLimit::from_str(limit.to_str()?)?;
108
109 let amount = headers
110 .get(MODEL_REQUESTS_USAGE_AMOUNT_HEADER_NAME)
111 .ok_or_else(|| anyhow!("missing {MODEL_REQUESTS_USAGE_AMOUNT_HEADER_NAME:?} header"))?;
112 let amount = amount.to_str()?.parse::<i32>()?;
113
114 Ok(Self { limit, amount })
115 }
116}
117
118#[derive(Debug, PartialEq, Clone, Copy, Serialize, Deserialize, Default)]
119pub struct TokenUsage {
120 #[serde(default, skip_serializing_if = "is_default")]
121 pub input_tokens: u32,
122 #[serde(default, skip_serializing_if = "is_default")]
123 pub output_tokens: u32,
124 #[serde(default, skip_serializing_if = "is_default")]
125 pub cache_creation_input_tokens: u32,
126 #[serde(default, skip_serializing_if = "is_default")]
127 pub cache_read_input_tokens: u32,
128}
129
130impl TokenUsage {
131 pub fn total_tokens(&self) -> u32 {
132 self.input_tokens
133 + self.output_tokens
134 + self.cache_read_input_tokens
135 + self.cache_creation_input_tokens
136 }
137}
138
139impl Add<TokenUsage> for TokenUsage {
140 type Output = Self;
141
142 fn add(self, other: Self) -> Self {
143 Self {
144 input_tokens: self.input_tokens + other.input_tokens,
145 output_tokens: self.output_tokens + other.output_tokens,
146 cache_creation_input_tokens: self.cache_creation_input_tokens
147 + other.cache_creation_input_tokens,
148 cache_read_input_tokens: self.cache_read_input_tokens + other.cache_read_input_tokens,
149 }
150 }
151}
152
153impl Sub<TokenUsage> for TokenUsage {
154 type Output = Self;
155
156 fn sub(self, other: Self) -> Self {
157 Self {
158 input_tokens: self.input_tokens - other.input_tokens,
159 output_tokens: self.output_tokens - other.output_tokens,
160 cache_creation_input_tokens: self.cache_creation_input_tokens
161 - other.cache_creation_input_tokens,
162 cache_read_input_tokens: self.cache_read_input_tokens - other.cache_read_input_tokens,
163 }
164 }
165}
166
167#[derive(Debug, PartialEq, Eq, Hash, Clone, Serialize, Deserialize)]
168pub struct LanguageModelToolUseId(Arc<str>);
169
170impl fmt::Display for LanguageModelToolUseId {
171 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
172 write!(f, "{}", self.0)
173 }
174}
175
176impl<T> From<T> for LanguageModelToolUseId
177where
178 T: Into<Arc<str>>,
179{
180 fn from(value: T) -> Self {
181 Self(value.into())
182 }
183}
184
185#[derive(Debug, PartialEq, Eq, Hash, Clone, Serialize, Deserialize)]
186pub struct LanguageModelToolUse {
187 pub id: LanguageModelToolUseId,
188 pub name: Arc<str>,
189 pub raw_input: String,
190 pub input: serde_json::Value,
191 pub is_input_complete: bool,
192}
193
194pub struct LanguageModelTextStream {
195 pub message_id: Option<String>,
196 pub stream: BoxStream<'static, Result<String>>,
197 // Has complete token usage after the stream has finished
198 pub last_token_usage: Arc<Mutex<TokenUsage>>,
199}
200
201impl Default for LanguageModelTextStream {
202 fn default() -> Self {
203 Self {
204 message_id: None,
205 stream: Box::pin(futures::stream::empty()),
206 last_token_usage: Arc::new(Mutex::new(TokenUsage::default())),
207 }
208 }
209}
210
211pub trait LanguageModel: Send + Sync {
212 fn id(&self) -> LanguageModelId;
213 fn name(&self) -> LanguageModelName;
214 fn provider_id(&self) -> LanguageModelProviderId;
215 fn provider_name(&self) -> LanguageModelProviderName;
216 fn telemetry_id(&self) -> String;
217
218 fn api_key(&self, _cx: &App) -> Option<String> {
219 None
220 }
221
222 /// Returns the availability of this language model.
223 fn availability(&self) -> LanguageModelAvailability {
224 LanguageModelAvailability::Public
225 }
226
227 /// Whether this model supports tools.
228 fn supports_tools(&self) -> bool;
229
230 fn tool_input_format(&self) -> LanguageModelToolSchemaFormat {
231 LanguageModelToolSchemaFormat::JsonSchema
232 }
233
234 fn max_token_count(&self) -> usize;
235 fn max_output_tokens(&self) -> Option<u32> {
236 None
237 }
238
239 fn count_tokens(
240 &self,
241 request: LanguageModelRequest,
242 cx: &App,
243 ) -> BoxFuture<'static, Result<usize>>;
244
245 fn stream_completion(
246 &self,
247 request: LanguageModelRequest,
248 cx: &AsyncApp,
249 ) -> BoxFuture<'static, Result<BoxStream<'static, Result<LanguageModelCompletionEvent>>>>;
250
251 fn stream_completion_with_usage(
252 &self,
253 request: LanguageModelRequest,
254 cx: &AsyncApp,
255 ) -> BoxFuture<
256 'static,
257 Result<(
258 BoxStream<'static, Result<LanguageModelCompletionEvent>>,
259 Option<RequestUsage>,
260 )>,
261 > {
262 self.stream_completion(request, cx)
263 .map(|result| result.map(|stream| (stream, None)))
264 .boxed()
265 }
266
267 fn stream_completion_text(
268 &self,
269 request: LanguageModelRequest,
270 cx: &AsyncApp,
271 ) -> BoxFuture<'static, Result<LanguageModelTextStream>> {
272 self.stream_completion_text_with_usage(request, cx)
273 .map(|result| result.map(|(stream, _usage)| stream))
274 .boxed()
275 }
276
277 fn stream_completion_text_with_usage(
278 &self,
279 request: LanguageModelRequest,
280 cx: &AsyncApp,
281 ) -> BoxFuture<'static, Result<(LanguageModelTextStream, Option<RequestUsage>)>> {
282 let future = self.stream_completion_with_usage(request, cx);
283
284 async move {
285 let (events, usage) = future.await?;
286 let mut events = events.fuse();
287 let mut message_id = None;
288 let mut first_item_text = None;
289 let last_token_usage = Arc::new(Mutex::new(TokenUsage::default()));
290
291 if let Some(first_event) = events.next().await {
292 match first_event {
293 Ok(LanguageModelCompletionEvent::StartMessage { message_id: id }) => {
294 message_id = Some(id.clone());
295 }
296 Ok(LanguageModelCompletionEvent::Text(text)) => {
297 first_item_text = Some(text);
298 }
299 _ => (),
300 }
301 }
302
303 let stream = futures::stream::iter(first_item_text.map(Ok))
304 .chain(events.filter_map({
305 let last_token_usage = last_token_usage.clone();
306 move |result| {
307 let last_token_usage = last_token_usage.clone();
308 async move {
309 match result {
310 Ok(LanguageModelCompletionEvent::StartMessage { .. }) => None,
311 Ok(LanguageModelCompletionEvent::Text(text)) => Some(Ok(text)),
312 Ok(LanguageModelCompletionEvent::Thinking { .. }) => None,
313 Ok(LanguageModelCompletionEvent::Stop(_)) => None,
314 Ok(LanguageModelCompletionEvent::ToolUse(_)) => None,
315 Ok(LanguageModelCompletionEvent::UsageUpdate(token_usage)) => {
316 *last_token_usage.lock() = token_usage;
317 None
318 }
319 Err(err) => Some(Err(err)),
320 }
321 }
322 }
323 }))
324 .boxed();
325
326 Ok((
327 LanguageModelTextStream {
328 message_id,
329 stream,
330 last_token_usage,
331 },
332 usage,
333 ))
334 }
335 .boxed()
336 }
337
338 fn cache_configuration(&self) -> Option<LanguageModelCacheConfiguration> {
339 None
340 }
341
342 #[cfg(any(test, feature = "test-support"))]
343 fn as_fake(&self) -> &fake_provider::FakeLanguageModel {
344 unimplemented!()
345 }
346}
347
348#[derive(Debug, Error)]
349pub enum LanguageModelKnownError {
350 #[error("Context window limit exceeded ({tokens})")]
351 ContextWindowLimitExceeded { tokens: usize },
352}
353
354pub trait LanguageModelTool: 'static + DeserializeOwned + JsonSchema {
355 fn name() -> String;
356 fn description() -> String;
357}
358
359/// An error that occurred when trying to authenticate the language model provider.
360#[derive(Debug, Error)]
361pub enum AuthenticateError {
362 #[error("credentials not found")]
363 CredentialsNotFound,
364 #[error(transparent)]
365 Other(#[from] anyhow::Error),
366}
367
368pub trait LanguageModelProvider: 'static {
369 fn id(&self) -> LanguageModelProviderId;
370 fn name(&self) -> LanguageModelProviderName;
371 fn icon(&self) -> IconName {
372 IconName::ZedAssistant
373 }
374 fn default_model(&self, cx: &App) -> Option<Arc<dyn LanguageModel>>;
375 fn default_fast_model(&self, cx: &App) -> Option<Arc<dyn LanguageModel>>;
376 fn provided_models(&self, cx: &App) -> Vec<Arc<dyn LanguageModel>>;
377 fn recommended_models(&self, _cx: &App) -> Vec<Arc<dyn LanguageModel>> {
378 Vec::new()
379 }
380 fn load_model(&self, _model: Arc<dyn LanguageModel>, _cx: &App) {}
381 fn is_authenticated(&self, cx: &App) -> bool;
382 fn authenticate(&self, cx: &mut App) -> Task<Result<(), AuthenticateError>>;
383 fn configuration_view(&self, window: &mut Window, cx: &mut App) -> AnyView;
384 fn must_accept_terms(&self, _cx: &App) -> bool {
385 false
386 }
387 fn render_accept_terms(
388 &self,
389 _view: LanguageModelProviderTosView,
390 _cx: &mut App,
391 ) -> Option<AnyElement> {
392 None
393 }
394 fn reset_credentials(&self, cx: &mut App) -> Task<Result<()>>;
395}
396
397#[derive(PartialEq, Eq)]
398pub enum LanguageModelProviderTosView {
399 /// When there are some past interactions in the Agent Panel.
400 ThreadtEmptyState,
401 /// When there are no past interactions in the Agent Panel.
402 ThreadFreshStart,
403 PromptEditorPopup,
404 Configuration,
405}
406
407pub trait LanguageModelProviderState: 'static {
408 type ObservableEntity;
409
410 fn observable_entity(&self) -> Option<gpui::Entity<Self::ObservableEntity>>;
411
412 fn subscribe<T: 'static>(
413 &self,
414 cx: &mut gpui::Context<T>,
415 callback: impl Fn(&mut T, &mut gpui::Context<T>) + 'static,
416 ) -> Option<gpui::Subscription> {
417 let entity = self.observable_entity()?;
418 Some(cx.observe(&entity, move |this, _, cx| {
419 callback(this, cx);
420 }))
421 }
422}
423
424#[derive(Clone, Eq, PartialEq, Hash, Debug, Ord, PartialOrd, Serialize, Deserialize)]
425pub struct LanguageModelId(pub SharedString);
426
427#[derive(Clone, Eq, PartialEq, Hash, Debug, Ord, PartialOrd)]
428pub struct LanguageModelName(pub SharedString);
429
430#[derive(Clone, Eq, PartialEq, Hash, Debug, Ord, PartialOrd)]
431pub struct LanguageModelProviderId(pub SharedString);
432
433#[derive(Clone, Eq, PartialEq, Hash, Debug, Ord, PartialOrd)]
434pub struct LanguageModelProviderName(pub SharedString);
435
436impl fmt::Display for LanguageModelProviderId {
437 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
438 write!(f, "{}", self.0)
439 }
440}
441
442impl From<String> for LanguageModelId {
443 fn from(value: String) -> Self {
444 Self(SharedString::from(value))
445 }
446}
447
448impl From<String> for LanguageModelName {
449 fn from(value: String) -> Self {
450 Self(SharedString::from(value))
451 }
452}
453
454impl From<String> for LanguageModelProviderId {
455 fn from(value: String) -> Self {
456 Self(SharedString::from(value))
457 }
458}
459
460impl From<String> for LanguageModelProviderName {
461 fn from(value: String) -> Self {
462 Self(SharedString::from(value))
463 }
464}