1mod model;
2mod rate_limiter;
3mod registry;
4mod request;
5mod role;
6mod telemetry;
7
8#[cfg(any(test, feature = "test-support"))]
9pub mod fake_provider;
10
11use anyhow::{Result, anyhow};
12use client::Client;
13use futures::FutureExt;
14use futures::{StreamExt, future::BoxFuture, stream::BoxStream};
15use gpui::{AnyElement, AnyView, App, AsyncApp, SharedString, Task, Window};
16use http_client::http::{HeaderMap, HeaderValue};
17use icons::IconName;
18use parking_lot::Mutex;
19use proto::Plan;
20use schemars::JsonSchema;
21use serde::{Deserialize, Serialize, de::DeserializeOwned};
22use std::fmt;
23use std::ops::{Add, Sub};
24use std::str::FromStr as _;
25use std::sync::Arc;
26use thiserror::Error;
27use util::serde::is_default;
28use zed_llm_client::{
29 MODEL_REQUESTS_USAGE_AMOUNT_HEADER_NAME, MODEL_REQUESTS_USAGE_LIMIT_HEADER_NAME, UsageLimit,
30};
31
32pub use crate::model::*;
33pub use crate::rate_limiter::*;
34pub use crate::registry::*;
35pub use crate::request::*;
36pub use crate::role::*;
37pub use crate::telemetry::*;
38
39pub const ZED_CLOUD_PROVIDER_ID: &str = "zed.dev";
40
41pub fn init(client: Arc<Client>, cx: &mut App) {
42 registry::init(cx);
43 RefreshLlmTokenListener::register(client.clone(), cx);
44}
45
46/// The availability of a [`LanguageModel`].
47#[derive(Debug, PartialEq, Eq, Clone, Copy)]
48pub enum LanguageModelAvailability {
49 /// The language model is available to the general public.
50 Public,
51 /// The language model is available to users on the indicated plan.
52 RequiresPlan(Plan),
53}
54
55/// Configuration for caching language model messages.
56#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema)]
57pub struct LanguageModelCacheConfiguration {
58 pub max_cache_anchors: usize,
59 pub should_speculate: bool,
60 pub min_total_token: usize,
61}
62
63/// A completion event from a language model.
64#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)]
65pub enum LanguageModelCompletionEvent {
66 Stop(StopReason),
67 Text(String),
68 Thinking(String),
69 ToolUse(LanguageModelToolUse),
70 StartMessage { message_id: String },
71 UsageUpdate(TokenUsage),
72}
73
74/// Indicates the format used to define the input schema for a language model tool.
75#[derive(Debug, PartialEq, Eq, Clone, Copy, Hash)]
76pub enum LanguageModelToolSchemaFormat {
77 /// A JSON schema, see https://json-schema.org
78 JsonSchema,
79 /// A subset of an OpenAPI 3.0 schema object supported by Google AI, see https://ai.google.dev/api/caching#Schema
80 JsonSchemaSubset,
81}
82
83#[derive(Debug, PartialEq, Clone, Copy, Serialize, Deserialize)]
84#[serde(rename_all = "snake_case")]
85pub enum StopReason {
86 EndTurn,
87 MaxTokens,
88 ToolUse,
89}
90
91#[derive(Debug, Clone, Copy)]
92pub struct RequestUsage {
93 pub limit: UsageLimit,
94 pub amount: i32,
95}
96
97impl RequestUsage {
98 pub fn from_headers(headers: &HeaderMap<HeaderValue>) -> Result<Self> {
99 let limit = headers
100 .get(MODEL_REQUESTS_USAGE_LIMIT_HEADER_NAME)
101 .ok_or_else(|| anyhow!("missing {MODEL_REQUESTS_USAGE_LIMIT_HEADER_NAME:?} header"))?;
102 let limit = UsageLimit::from_str(limit.to_str()?)?;
103
104 let amount = headers
105 .get(MODEL_REQUESTS_USAGE_AMOUNT_HEADER_NAME)
106 .ok_or_else(|| anyhow!("missing {MODEL_REQUESTS_USAGE_AMOUNT_HEADER_NAME:?} header"))?;
107 let amount = amount.to_str()?.parse::<i32>()?;
108
109 Ok(Self { limit, amount })
110 }
111}
112
113#[derive(Debug, PartialEq, Clone, Copy, Serialize, Deserialize, Default)]
114pub struct TokenUsage {
115 #[serde(default, skip_serializing_if = "is_default")]
116 pub input_tokens: u32,
117 #[serde(default, skip_serializing_if = "is_default")]
118 pub output_tokens: u32,
119 #[serde(default, skip_serializing_if = "is_default")]
120 pub cache_creation_input_tokens: u32,
121 #[serde(default, skip_serializing_if = "is_default")]
122 pub cache_read_input_tokens: u32,
123}
124
125impl TokenUsage {
126 pub fn total_tokens(&self) -> u32 {
127 self.input_tokens
128 + self.output_tokens
129 + self.cache_read_input_tokens
130 + self.cache_creation_input_tokens
131 }
132}
133
134impl Add<TokenUsage> for TokenUsage {
135 type Output = Self;
136
137 fn add(self, other: Self) -> Self {
138 Self {
139 input_tokens: self.input_tokens + other.input_tokens,
140 output_tokens: self.output_tokens + other.output_tokens,
141 cache_creation_input_tokens: self.cache_creation_input_tokens
142 + other.cache_creation_input_tokens,
143 cache_read_input_tokens: self.cache_read_input_tokens + other.cache_read_input_tokens,
144 }
145 }
146}
147
148impl Sub<TokenUsage> for TokenUsage {
149 type Output = Self;
150
151 fn sub(self, other: Self) -> Self {
152 Self {
153 input_tokens: self.input_tokens - other.input_tokens,
154 output_tokens: self.output_tokens - other.output_tokens,
155 cache_creation_input_tokens: self.cache_creation_input_tokens
156 - other.cache_creation_input_tokens,
157 cache_read_input_tokens: self.cache_read_input_tokens - other.cache_read_input_tokens,
158 }
159 }
160}
161
162#[derive(Debug, PartialEq, Eq, Hash, Clone, Serialize, Deserialize)]
163pub struct LanguageModelToolUseId(Arc<str>);
164
165impl fmt::Display for LanguageModelToolUseId {
166 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
167 write!(f, "{}", self.0)
168 }
169}
170
171impl<T> From<T> for LanguageModelToolUseId
172where
173 T: Into<Arc<str>>,
174{
175 fn from(value: T) -> Self {
176 Self(value.into())
177 }
178}
179
180#[derive(Debug, PartialEq, Eq, Hash, Clone, Serialize, Deserialize)]
181pub struct LanguageModelToolUse {
182 pub id: LanguageModelToolUseId,
183 pub name: Arc<str>,
184 pub input: serde_json::Value,
185}
186
187pub struct LanguageModelTextStream {
188 pub message_id: Option<String>,
189 pub stream: BoxStream<'static, Result<String>>,
190 // Has complete token usage after the stream has finished
191 pub last_token_usage: Arc<Mutex<TokenUsage>>,
192}
193
194impl Default for LanguageModelTextStream {
195 fn default() -> Self {
196 Self {
197 message_id: None,
198 stream: Box::pin(futures::stream::empty()),
199 last_token_usage: Arc::new(Mutex::new(TokenUsage::default())),
200 }
201 }
202}
203
204pub trait LanguageModel: Send + Sync {
205 fn id(&self) -> LanguageModelId;
206 fn name(&self) -> LanguageModelName;
207 fn provider_id(&self) -> LanguageModelProviderId;
208 fn provider_name(&self) -> LanguageModelProviderName;
209 fn telemetry_id(&self) -> String;
210
211 fn api_key(&self, _cx: &App) -> Option<String> {
212 None
213 }
214
215 /// Returns the availability of this language model.
216 fn availability(&self) -> LanguageModelAvailability {
217 LanguageModelAvailability::Public
218 }
219
220 /// Whether this model supports tools.
221 fn supports_tools(&self) -> bool;
222
223 fn tool_input_format(&self) -> LanguageModelToolSchemaFormat {
224 LanguageModelToolSchemaFormat::JsonSchema
225 }
226
227 fn max_token_count(&self) -> usize;
228 fn max_output_tokens(&self) -> Option<u32> {
229 None
230 }
231
232 fn count_tokens(
233 &self,
234 request: LanguageModelRequest,
235 cx: &App,
236 ) -> BoxFuture<'static, Result<usize>>;
237
238 fn stream_completion(
239 &self,
240 request: LanguageModelRequest,
241 cx: &AsyncApp,
242 ) -> BoxFuture<'static, Result<BoxStream<'static, Result<LanguageModelCompletionEvent>>>>;
243
244 fn stream_completion_with_usage(
245 &self,
246 request: LanguageModelRequest,
247 cx: &AsyncApp,
248 ) -> BoxFuture<
249 'static,
250 Result<(
251 BoxStream<'static, Result<LanguageModelCompletionEvent>>,
252 Option<RequestUsage>,
253 )>,
254 > {
255 self.stream_completion(request, cx)
256 .map(|result| result.map(|stream| (stream, None)))
257 .boxed()
258 }
259
260 fn stream_completion_text(
261 &self,
262 request: LanguageModelRequest,
263 cx: &AsyncApp,
264 ) -> BoxFuture<'static, Result<LanguageModelTextStream>> {
265 self.stream_completion_text_with_usage(request, cx)
266 .map(|result| result.map(|(stream, _usage)| stream))
267 .boxed()
268 }
269
270 fn stream_completion_text_with_usage(
271 &self,
272 request: LanguageModelRequest,
273 cx: &AsyncApp,
274 ) -> BoxFuture<'static, Result<(LanguageModelTextStream, Option<RequestUsage>)>> {
275 let future = self.stream_completion_with_usage(request, cx);
276
277 async move {
278 let (events, usage) = future.await?;
279 let mut events = events.fuse();
280 let mut message_id = None;
281 let mut first_item_text = None;
282 let last_token_usage = Arc::new(Mutex::new(TokenUsage::default()));
283
284 if let Some(first_event) = events.next().await {
285 match first_event {
286 Ok(LanguageModelCompletionEvent::StartMessage { message_id: id }) => {
287 message_id = Some(id.clone());
288 }
289 Ok(LanguageModelCompletionEvent::Text(text)) => {
290 first_item_text = Some(text);
291 }
292 _ => (),
293 }
294 }
295
296 let stream = futures::stream::iter(first_item_text.map(Ok))
297 .chain(events.filter_map({
298 let last_token_usage = last_token_usage.clone();
299 move |result| {
300 let last_token_usage = last_token_usage.clone();
301 async move {
302 match result {
303 Ok(LanguageModelCompletionEvent::StartMessage { .. }) => None,
304 Ok(LanguageModelCompletionEvent::Text(text)) => Some(Ok(text)),
305 Ok(LanguageModelCompletionEvent::Thinking(_)) => None,
306 Ok(LanguageModelCompletionEvent::Stop(_)) => None,
307 Ok(LanguageModelCompletionEvent::ToolUse(_)) => None,
308 Ok(LanguageModelCompletionEvent::UsageUpdate(token_usage)) => {
309 *last_token_usage.lock() = token_usage;
310 None
311 }
312 Err(err) => Some(Err(err)),
313 }
314 }
315 }
316 }))
317 .boxed();
318
319 Ok((
320 LanguageModelTextStream {
321 message_id,
322 stream,
323 last_token_usage,
324 },
325 usage,
326 ))
327 }
328 .boxed()
329 }
330
331 fn cache_configuration(&self) -> Option<LanguageModelCacheConfiguration> {
332 None
333 }
334
335 #[cfg(any(test, feature = "test-support"))]
336 fn as_fake(&self) -> &fake_provider::FakeLanguageModel {
337 unimplemented!()
338 }
339}
340
341#[derive(Debug, Error)]
342pub enum LanguageModelKnownError {
343 #[error("Context window limit exceeded ({tokens})")]
344 ContextWindowLimitExceeded { tokens: usize },
345}
346
347pub trait LanguageModelTool: 'static + DeserializeOwned + JsonSchema {
348 fn name() -> String;
349 fn description() -> String;
350}
351
352/// An error that occurred when trying to authenticate the language model provider.
353#[derive(Debug, Error)]
354pub enum AuthenticateError {
355 #[error("credentials not found")]
356 CredentialsNotFound,
357 #[error(transparent)]
358 Other(#[from] anyhow::Error),
359}
360
361pub trait LanguageModelProvider: 'static {
362 fn id(&self) -> LanguageModelProviderId;
363 fn name(&self) -> LanguageModelProviderName;
364 fn icon(&self) -> IconName {
365 IconName::ZedAssistant
366 }
367 fn default_model(&self, cx: &App) -> Option<Arc<dyn LanguageModel>>;
368 fn provided_models(&self, cx: &App) -> Vec<Arc<dyn LanguageModel>>;
369 fn recommended_models(&self, _cx: &App) -> Vec<Arc<dyn LanguageModel>> {
370 Vec::new()
371 }
372 fn load_model(&self, _model: Arc<dyn LanguageModel>, _cx: &App) {}
373 fn is_authenticated(&self, cx: &App) -> bool;
374 fn authenticate(&self, cx: &mut App) -> Task<Result<(), AuthenticateError>>;
375 fn configuration_view(&self, window: &mut Window, cx: &mut App) -> AnyView;
376 fn must_accept_terms(&self, _cx: &App) -> bool {
377 false
378 }
379 fn render_accept_terms(
380 &self,
381 _view: LanguageModelProviderTosView,
382 _cx: &mut App,
383 ) -> Option<AnyElement> {
384 None
385 }
386 fn reset_credentials(&self, cx: &mut App) -> Task<Result<()>>;
387}
388
389#[derive(PartialEq, Eq)]
390pub enum LanguageModelProviderTosView {
391 /// When there are some past interactions in the Agent Panel.
392 ThreadtEmptyState,
393 /// When there are no past interactions in the Agent Panel.
394 ThreadFreshStart,
395 PromptEditorPopup,
396 Configuration,
397}
398
399pub trait LanguageModelProviderState: 'static {
400 type ObservableEntity;
401
402 fn observable_entity(&self) -> Option<gpui::Entity<Self::ObservableEntity>>;
403
404 fn subscribe<T: 'static>(
405 &self,
406 cx: &mut gpui::Context<T>,
407 callback: impl Fn(&mut T, &mut gpui::Context<T>) + 'static,
408 ) -> Option<gpui::Subscription> {
409 let entity = self.observable_entity()?;
410 Some(cx.observe(&entity, move |this, _, cx| {
411 callback(this, cx);
412 }))
413 }
414}
415
416#[derive(Clone, Eq, PartialEq, Hash, Debug, Ord, PartialOrd, Serialize, Deserialize)]
417pub struct LanguageModelId(pub SharedString);
418
419#[derive(Clone, Eq, PartialEq, Hash, Debug, Ord, PartialOrd)]
420pub struct LanguageModelName(pub SharedString);
421
422#[derive(Clone, Eq, PartialEq, Hash, Debug, Ord, PartialOrd)]
423pub struct LanguageModelProviderId(pub SharedString);
424
425#[derive(Clone, Eq, PartialEq, Hash, Debug, Ord, PartialOrd)]
426pub struct LanguageModelProviderName(pub SharedString);
427
428impl fmt::Display for LanguageModelProviderId {
429 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
430 write!(f, "{}", self.0)
431 }
432}
433
434impl From<String> for LanguageModelId {
435 fn from(value: String) -> Self {
436 Self(SharedString::from(value))
437 }
438}
439
440impl From<String> for LanguageModelName {
441 fn from(value: String) -> Self {
442 Self(SharedString::from(value))
443 }
444}
445
446impl From<String> for LanguageModelProviderId {
447 fn from(value: String) -> Self {
448 Self(SharedString::from(value))
449 }
450}
451
452impl From<String> for LanguageModelProviderName {
453 fn from(value: String) -> Self {
454 Self(SharedString::from(value))
455 }
456}