language_model.rs

  1mod model;
  2mod rate_limiter;
  3mod registry;
  4mod request;
  5mod role;
  6mod telemetry;
  7
  8#[cfg(any(test, feature = "test-support"))]
  9pub mod fake_provider;
 10
 11use anyhow::{Result, anyhow};
 12use client::Client;
 13use futures::FutureExt;
 14use futures::{StreamExt, future::BoxFuture, stream::BoxStream};
 15use gpui::{AnyElement, AnyView, App, AsyncApp, SharedString, Task, Window};
 16use http_client::http::{HeaderMap, HeaderValue};
 17use icons::IconName;
 18use parking_lot::Mutex;
 19use proto::Plan;
 20use schemars::JsonSchema;
 21use serde::{Deserialize, Serialize, de::DeserializeOwned};
 22use std::fmt;
 23use std::ops::{Add, Sub};
 24use std::str::FromStr as _;
 25use std::sync::Arc;
 26use thiserror::Error;
 27use util::serde::is_default;
 28use zed_llm_client::{
 29    MODEL_REQUESTS_USAGE_AMOUNT_HEADER_NAME, MODEL_REQUESTS_USAGE_LIMIT_HEADER_NAME, UsageLimit,
 30};
 31
 32pub use crate::model::*;
 33pub use crate::rate_limiter::*;
 34pub use crate::registry::*;
 35pub use crate::request::*;
 36pub use crate::role::*;
 37pub use crate::telemetry::*;
 38
 39pub const ZED_CLOUD_PROVIDER_ID: &str = "zed.dev";
 40
 41pub fn init(client: Arc<Client>, cx: &mut App) {
 42    registry::init(cx);
 43    RefreshLlmTokenListener::register(client.clone(), cx);
 44}
 45
 46/// The availability of a [`LanguageModel`].
 47#[derive(Debug, PartialEq, Eq, Clone, Copy)]
 48pub enum LanguageModelAvailability {
 49    /// The language model is available to the general public.
 50    Public,
 51    /// The language model is available to users on the indicated plan.
 52    RequiresPlan(Plan),
 53}
 54
 55/// Configuration for caching language model messages.
 56#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema)]
 57pub struct LanguageModelCacheConfiguration {
 58    pub max_cache_anchors: usize,
 59    pub should_speculate: bool,
 60    pub min_total_token: usize,
 61}
 62
 63/// A completion event from a language model.
 64#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)]
 65pub enum LanguageModelCompletionEvent {
 66    Stop(StopReason),
 67    Text(String),
 68    Thinking {
 69        text: String,
 70        signature: Option<String>,
 71    },
 72    ToolUse(LanguageModelToolUse),
 73    StartMessage {
 74        message_id: String,
 75    },
 76    UsageUpdate(TokenUsage),
 77}
 78
 79#[derive(Error, Debug)]
 80pub enum LanguageModelCompletionError {
 81    #[error("received bad input JSON")]
 82    BadInputJson {
 83        id: LanguageModelToolUseId,
 84        tool_name: Arc<str>,
 85        raw_input: Arc<str>,
 86        json_parse_error: String,
 87    },
 88    #[error(transparent)]
 89    Other(#[from] anyhow::Error),
 90}
 91
 92/// Indicates the format used to define the input schema for a language model tool.
 93#[derive(Debug, PartialEq, Eq, Clone, Copy, Hash)]
 94pub enum LanguageModelToolSchemaFormat {
 95    /// A JSON schema, see https://json-schema.org
 96    JsonSchema,
 97    /// A subset of an OpenAPI 3.0 schema object supported by Google AI, see https://ai.google.dev/api/caching#Schema
 98    JsonSchemaSubset,
 99}
100
101#[derive(Debug, PartialEq, Clone, Copy, Serialize, Deserialize)]
102#[serde(rename_all = "snake_case")]
103pub enum StopReason {
104    EndTurn,
105    MaxTokens,
106    ToolUse,
107}
108
109#[derive(Debug, Clone, Copy)]
110pub struct RequestUsage {
111    pub limit: UsageLimit,
112    pub amount: i32,
113}
114
115impl RequestUsage {
116    pub fn from_headers(headers: &HeaderMap<HeaderValue>) -> Result<Self> {
117        let limit = headers
118            .get(MODEL_REQUESTS_USAGE_LIMIT_HEADER_NAME)
119            .ok_or_else(|| anyhow!("missing {MODEL_REQUESTS_USAGE_LIMIT_HEADER_NAME:?} header"))?;
120        let limit = UsageLimit::from_str(limit.to_str()?)?;
121
122        let amount = headers
123            .get(MODEL_REQUESTS_USAGE_AMOUNT_HEADER_NAME)
124            .ok_or_else(|| anyhow!("missing {MODEL_REQUESTS_USAGE_AMOUNT_HEADER_NAME:?} header"))?;
125        let amount = amount.to_str()?.parse::<i32>()?;
126
127        Ok(Self { limit, amount })
128    }
129}
130
131#[derive(Debug, PartialEq, Clone, Copy, Serialize, Deserialize, Default)]
132pub struct TokenUsage {
133    #[serde(default, skip_serializing_if = "is_default")]
134    pub input_tokens: u32,
135    #[serde(default, skip_serializing_if = "is_default")]
136    pub output_tokens: u32,
137    #[serde(default, skip_serializing_if = "is_default")]
138    pub cache_creation_input_tokens: u32,
139    #[serde(default, skip_serializing_if = "is_default")]
140    pub cache_read_input_tokens: u32,
141}
142
143impl TokenUsage {
144    pub fn total_tokens(&self) -> u32 {
145        self.input_tokens
146            + self.output_tokens
147            + self.cache_read_input_tokens
148            + self.cache_creation_input_tokens
149    }
150}
151
152impl Add<TokenUsage> for TokenUsage {
153    type Output = Self;
154
155    fn add(self, other: Self) -> Self {
156        Self {
157            input_tokens: self.input_tokens + other.input_tokens,
158            output_tokens: self.output_tokens + other.output_tokens,
159            cache_creation_input_tokens: self.cache_creation_input_tokens
160                + other.cache_creation_input_tokens,
161            cache_read_input_tokens: self.cache_read_input_tokens + other.cache_read_input_tokens,
162        }
163    }
164}
165
166impl Sub<TokenUsage> for TokenUsage {
167    type Output = Self;
168
169    fn sub(self, other: Self) -> Self {
170        Self {
171            input_tokens: self.input_tokens - other.input_tokens,
172            output_tokens: self.output_tokens - other.output_tokens,
173            cache_creation_input_tokens: self.cache_creation_input_tokens
174                - other.cache_creation_input_tokens,
175            cache_read_input_tokens: self.cache_read_input_tokens - other.cache_read_input_tokens,
176        }
177    }
178}
179
180#[derive(Debug, PartialEq, Eq, Hash, Clone, Serialize, Deserialize)]
181pub struct LanguageModelToolUseId(Arc<str>);
182
183impl fmt::Display for LanguageModelToolUseId {
184    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
185        write!(f, "{}", self.0)
186    }
187}
188
189impl<T> From<T> for LanguageModelToolUseId
190where
191    T: Into<Arc<str>>,
192{
193    fn from(value: T) -> Self {
194        Self(value.into())
195    }
196}
197
198#[derive(Debug, PartialEq, Eq, Hash, Clone, Serialize, Deserialize)]
199pub struct LanguageModelToolUse {
200    pub id: LanguageModelToolUseId,
201    pub name: Arc<str>,
202    pub raw_input: String,
203    pub input: serde_json::Value,
204    pub is_input_complete: bool,
205}
206
207pub struct LanguageModelTextStream {
208    pub message_id: Option<String>,
209    pub stream: BoxStream<'static, Result<String, LanguageModelCompletionError>>,
210    // Has complete token usage after the stream has finished
211    pub last_token_usage: Arc<Mutex<TokenUsage>>,
212}
213
214impl Default for LanguageModelTextStream {
215    fn default() -> Self {
216        Self {
217            message_id: None,
218            stream: Box::pin(futures::stream::empty()),
219            last_token_usage: Arc::new(Mutex::new(TokenUsage::default())),
220        }
221    }
222}
223
224pub trait LanguageModel: Send + Sync {
225    fn id(&self) -> LanguageModelId;
226    fn name(&self) -> LanguageModelName;
227    fn provider_id(&self) -> LanguageModelProviderId;
228    fn provider_name(&self) -> LanguageModelProviderName;
229    fn telemetry_id(&self) -> String;
230
231    fn api_key(&self, _cx: &App) -> Option<String> {
232        None
233    }
234
235    /// Returns the availability of this language model.
236    fn availability(&self) -> LanguageModelAvailability {
237        LanguageModelAvailability::Public
238    }
239
240    /// Whether this model supports tools.
241    fn supports_tools(&self) -> bool;
242
243    /// Returns whether this model supports "max mode";
244    fn supports_max_mode(&self) -> bool {
245        if self.provider_id().0 != ZED_CLOUD_PROVIDER_ID {
246            return false;
247        }
248
249        const MAX_MODE_CAPABLE_MODELS: &[CloudModel] = &[
250            CloudModel::Anthropic(anthropic::Model::Claude3_7Sonnet),
251            CloudModel::Anthropic(anthropic::Model::Claude3_7SonnetThinking),
252        ];
253
254        for model in MAX_MODE_CAPABLE_MODELS {
255            if self.id().0 == model.id() {
256                return true;
257            }
258        }
259
260        false
261    }
262
263    fn tool_input_format(&self) -> LanguageModelToolSchemaFormat {
264        LanguageModelToolSchemaFormat::JsonSchema
265    }
266
267    fn max_token_count(&self) -> usize;
268    fn max_output_tokens(&self) -> Option<u32> {
269        None
270    }
271
272    fn count_tokens(
273        &self,
274        request: LanguageModelRequest,
275        cx: &App,
276    ) -> BoxFuture<'static, Result<usize>>;
277
278    fn stream_completion(
279        &self,
280        request: LanguageModelRequest,
281        cx: &AsyncApp,
282    ) -> BoxFuture<
283        'static,
284        Result<
285            BoxStream<'static, Result<LanguageModelCompletionEvent, LanguageModelCompletionError>>,
286        >,
287    >;
288
289    fn stream_completion_with_usage(
290        &self,
291        request: LanguageModelRequest,
292        cx: &AsyncApp,
293    ) -> BoxFuture<
294        'static,
295        Result<(
296            BoxStream<'static, Result<LanguageModelCompletionEvent, LanguageModelCompletionError>>,
297            Option<RequestUsage>,
298        )>,
299    > {
300        self.stream_completion(request, cx)
301            .map(|result| result.map(|stream| (stream, None)))
302            .boxed()
303    }
304
305    fn stream_completion_text(
306        &self,
307        request: LanguageModelRequest,
308        cx: &AsyncApp,
309    ) -> BoxFuture<'static, Result<LanguageModelTextStream>> {
310        self.stream_completion_text_with_usage(request, cx)
311            .map(|result| result.map(|(stream, _usage)| stream))
312            .boxed()
313    }
314
315    fn stream_completion_text_with_usage(
316        &self,
317        request: LanguageModelRequest,
318        cx: &AsyncApp,
319    ) -> BoxFuture<'static, Result<(LanguageModelTextStream, Option<RequestUsage>)>> {
320        let future = self.stream_completion_with_usage(request, cx);
321
322        async move {
323            let (events, usage) = future.await?;
324            let mut events = events.fuse();
325            let mut message_id = None;
326            let mut first_item_text = None;
327            let last_token_usage = Arc::new(Mutex::new(TokenUsage::default()));
328
329            if let Some(first_event) = events.next().await {
330                match first_event {
331                    Ok(LanguageModelCompletionEvent::StartMessage { message_id: id }) => {
332                        message_id = Some(id.clone());
333                    }
334                    Ok(LanguageModelCompletionEvent::Text(text)) => {
335                        first_item_text = Some(text);
336                    }
337                    _ => (),
338                }
339            }
340
341            let stream = futures::stream::iter(first_item_text.map(Ok))
342                .chain(events.filter_map({
343                    let last_token_usage = last_token_usage.clone();
344                    move |result| {
345                        let last_token_usage = last_token_usage.clone();
346                        async move {
347                            match result {
348                                Ok(LanguageModelCompletionEvent::StartMessage { .. }) => None,
349                                Ok(LanguageModelCompletionEvent::Text(text)) => Some(Ok(text)),
350                                Ok(LanguageModelCompletionEvent::Thinking { .. }) => None,
351                                Ok(LanguageModelCompletionEvent::Stop(_)) => None,
352                                Ok(LanguageModelCompletionEvent::ToolUse(_)) => None,
353                                Ok(LanguageModelCompletionEvent::UsageUpdate(token_usage)) => {
354                                    *last_token_usage.lock() = token_usage;
355                                    None
356                                }
357                                Err(err) => Some(Err(err)),
358                            }
359                        }
360                    }
361                }))
362                .boxed();
363
364            Ok((
365                LanguageModelTextStream {
366                    message_id,
367                    stream,
368                    last_token_usage,
369                },
370                usage,
371            ))
372        }
373        .boxed()
374    }
375
376    fn cache_configuration(&self) -> Option<LanguageModelCacheConfiguration> {
377        None
378    }
379
380    #[cfg(any(test, feature = "test-support"))]
381    fn as_fake(&self) -> &fake_provider::FakeLanguageModel {
382        unimplemented!()
383    }
384}
385
386#[derive(Debug, Error)]
387pub enum LanguageModelKnownError {
388    #[error("Context window limit exceeded ({tokens})")]
389    ContextWindowLimitExceeded { tokens: usize },
390}
391
392pub trait LanguageModelTool: 'static + DeserializeOwned + JsonSchema {
393    fn name() -> String;
394    fn description() -> String;
395}
396
397/// An error that occurred when trying to authenticate the language model provider.
398#[derive(Debug, Error)]
399pub enum AuthenticateError {
400    #[error("credentials not found")]
401    CredentialsNotFound,
402    #[error(transparent)]
403    Other(#[from] anyhow::Error),
404}
405
406pub trait LanguageModelProvider: 'static {
407    fn id(&self) -> LanguageModelProviderId;
408    fn name(&self) -> LanguageModelProviderName;
409    fn icon(&self) -> IconName {
410        IconName::ZedAssistant
411    }
412    fn default_model(&self, cx: &App) -> Option<Arc<dyn LanguageModel>>;
413    fn default_fast_model(&self, cx: &App) -> Option<Arc<dyn LanguageModel>>;
414    fn provided_models(&self, cx: &App) -> Vec<Arc<dyn LanguageModel>>;
415    fn recommended_models(&self, _cx: &App) -> Vec<Arc<dyn LanguageModel>> {
416        Vec::new()
417    }
418    fn load_model(&self, _model: Arc<dyn LanguageModel>, _cx: &App) {}
419    fn is_authenticated(&self, cx: &App) -> bool;
420    fn authenticate(&self, cx: &mut App) -> Task<Result<(), AuthenticateError>>;
421    fn configuration_view(&self, window: &mut Window, cx: &mut App) -> AnyView;
422    fn must_accept_terms(&self, _cx: &App) -> bool {
423        false
424    }
425    fn render_accept_terms(
426        &self,
427        _view: LanguageModelProviderTosView,
428        _cx: &mut App,
429    ) -> Option<AnyElement> {
430        None
431    }
432    fn reset_credentials(&self, cx: &mut App) -> Task<Result<()>>;
433}
434
435#[derive(PartialEq, Eq)]
436pub enum LanguageModelProviderTosView {
437    /// When there are some past interactions in the Agent Panel.
438    ThreadtEmptyState,
439    /// When there are no past interactions in the Agent Panel.
440    ThreadFreshStart,
441    PromptEditorPopup,
442    Configuration,
443}
444
445pub trait LanguageModelProviderState: 'static {
446    type ObservableEntity;
447
448    fn observable_entity(&self) -> Option<gpui::Entity<Self::ObservableEntity>>;
449
450    fn subscribe<T: 'static>(
451        &self,
452        cx: &mut gpui::Context<T>,
453        callback: impl Fn(&mut T, &mut gpui::Context<T>) + 'static,
454    ) -> Option<gpui::Subscription> {
455        let entity = self.observable_entity()?;
456        Some(cx.observe(&entity, move |this, _, cx| {
457            callback(this, cx);
458        }))
459    }
460}
461
462#[derive(Clone, Eq, PartialEq, Hash, Debug, Ord, PartialOrd, Serialize, Deserialize)]
463pub struct LanguageModelId(pub SharedString);
464
465#[derive(Clone, Eq, PartialEq, Hash, Debug, Ord, PartialOrd)]
466pub struct LanguageModelName(pub SharedString);
467
468#[derive(Clone, Eq, PartialEq, Hash, Debug, Ord, PartialOrd)]
469pub struct LanguageModelProviderId(pub SharedString);
470
471#[derive(Clone, Eq, PartialEq, Hash, Debug, Ord, PartialOrd)]
472pub struct LanguageModelProviderName(pub SharedString);
473
474impl fmt::Display for LanguageModelProviderId {
475    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
476        write!(f, "{}", self.0)
477    }
478}
479
480impl From<String> for LanguageModelId {
481    fn from(value: String) -> Self {
482        Self(SharedString::from(value))
483    }
484}
485
486impl From<String> for LanguageModelName {
487    fn from(value: String) -> Self {
488        Self(SharedString::from(value))
489    }
490}
491
492impl From<String> for LanguageModelProviderId {
493    fn from(value: String) -> Self {
494        Self(SharedString::from(value))
495    }
496}
497
498impl From<String> for LanguageModelProviderName {
499    fn from(value: String) -> Self {
500        Self(SharedString::from(value))
501    }
502}