language_model.rs

  1mod model;
  2mod rate_limiter;
  3mod registry;
  4mod request;
  5mod role;
  6mod telemetry;
  7
  8#[cfg(any(test, feature = "test-support"))]
  9pub mod fake_provider;
 10
 11use anyhow::{Context as _, Result};
 12use client::Client;
 13use futures::FutureExt;
 14use futures::{StreamExt, future::BoxFuture, stream::BoxStream};
 15use gpui::{AnyElement, AnyView, App, AsyncApp, SharedString, Task, Window};
 16use http_client::http::{HeaderMap, HeaderValue};
 17use icons::IconName;
 18use parking_lot::Mutex;
 19use schemars::JsonSchema;
 20use serde::{Deserialize, Serialize, de::DeserializeOwned};
 21use std::fmt;
 22use std::ops::{Add, Sub};
 23use std::str::FromStr as _;
 24use std::sync::Arc;
 25use std::time::Duration;
 26use thiserror::Error;
 27use util::serde::is_default;
 28use zed_llm_client::{
 29    CompletionRequestStatus, MODEL_REQUESTS_USAGE_AMOUNT_HEADER_NAME,
 30    MODEL_REQUESTS_USAGE_LIMIT_HEADER_NAME, UsageLimit,
 31};
 32
 33pub use crate::model::*;
 34pub use crate::rate_limiter::*;
 35pub use crate::registry::*;
 36pub use crate::request::*;
 37pub use crate::role::*;
 38pub use crate::telemetry::*;
 39
 40pub const ZED_CLOUD_PROVIDER_ID: &str = "zed.dev";
 41
 42pub fn init(client: Arc<Client>, cx: &mut App) {
 43    init_settings(cx);
 44    RefreshLlmTokenListener::register(client.clone(), cx);
 45}
 46
 47pub fn init_settings(cx: &mut App) {
 48    registry::init(cx);
 49}
 50
 51/// Configuration for caching language model messages.
 52#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema)]
 53pub struct LanguageModelCacheConfiguration {
 54    pub max_cache_anchors: usize,
 55    pub should_speculate: bool,
 56    pub min_total_token: u64,
 57}
 58
 59/// A completion event from a language model.
 60#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)]
 61pub enum LanguageModelCompletionEvent {
 62    StatusUpdate(CompletionRequestStatus),
 63    Stop(StopReason),
 64    Text(String),
 65    Thinking {
 66        text: String,
 67        signature: Option<String>,
 68    },
 69    ToolUse(LanguageModelToolUse),
 70    StartMessage {
 71        message_id: String,
 72    },
 73    UsageUpdate(TokenUsage),
 74}
 75
 76#[derive(Error, Debug)]
 77pub enum LanguageModelCompletionError {
 78    #[error("rate limit exceeded, retry after {0:?}")]
 79    RateLimit(Duration),
 80    #[error("received bad input JSON")]
 81    BadInputJson {
 82        id: LanguageModelToolUseId,
 83        tool_name: Arc<str>,
 84        raw_input: Arc<str>,
 85        json_parse_error: String,
 86    },
 87    #[error(transparent)]
 88    Other(#[from] anyhow::Error),
 89}
 90
 91/// Indicates the format used to define the input schema for a language model tool.
 92#[derive(Debug, PartialEq, Eq, Clone, Copy, Hash)]
 93pub enum LanguageModelToolSchemaFormat {
 94    /// A JSON schema, see https://json-schema.org
 95    JsonSchema,
 96    /// A subset of an OpenAPI 3.0 schema object supported by Google AI, see https://ai.google.dev/api/caching#Schema
 97    JsonSchemaSubset,
 98}
 99
100#[derive(Debug, PartialEq, Clone, Copy, Serialize, Deserialize)]
101#[serde(rename_all = "snake_case")]
102pub enum StopReason {
103    EndTurn,
104    MaxTokens,
105    ToolUse,
106    Refusal,
107}
108
109#[derive(Debug, Clone, Copy)]
110pub struct RequestUsage {
111    pub limit: UsageLimit,
112    pub amount: i32,
113}
114
115impl RequestUsage {
116    pub fn from_headers(headers: &HeaderMap<HeaderValue>) -> Result<Self> {
117        let limit = headers
118            .get(MODEL_REQUESTS_USAGE_LIMIT_HEADER_NAME)
119            .with_context(|| {
120                format!("missing {MODEL_REQUESTS_USAGE_LIMIT_HEADER_NAME:?} header")
121            })?;
122        let limit = UsageLimit::from_str(limit.to_str()?)?;
123
124        let amount = headers
125            .get(MODEL_REQUESTS_USAGE_AMOUNT_HEADER_NAME)
126            .with_context(|| {
127                format!("missing {MODEL_REQUESTS_USAGE_AMOUNT_HEADER_NAME:?} header")
128            })?;
129        let amount = amount.to_str()?.parse::<i32>()?;
130
131        Ok(Self { limit, amount })
132    }
133}
134
135#[derive(Debug, PartialEq, Clone, Copy, Serialize, Deserialize, Default)]
136pub struct TokenUsage {
137    #[serde(default, skip_serializing_if = "is_default")]
138    pub input_tokens: u64,
139    #[serde(default, skip_serializing_if = "is_default")]
140    pub output_tokens: u64,
141    #[serde(default, skip_serializing_if = "is_default")]
142    pub cache_creation_input_tokens: u64,
143    #[serde(default, skip_serializing_if = "is_default")]
144    pub cache_read_input_tokens: u64,
145}
146
147impl TokenUsage {
148    pub fn total_tokens(&self) -> u64 {
149        self.input_tokens
150            + self.output_tokens
151            + self.cache_read_input_tokens
152            + self.cache_creation_input_tokens
153    }
154}
155
156impl Add<TokenUsage> for TokenUsage {
157    type Output = Self;
158
159    fn add(self, other: Self) -> Self {
160        Self {
161            input_tokens: self.input_tokens + other.input_tokens,
162            output_tokens: self.output_tokens + other.output_tokens,
163            cache_creation_input_tokens: self.cache_creation_input_tokens
164                + other.cache_creation_input_tokens,
165            cache_read_input_tokens: self.cache_read_input_tokens + other.cache_read_input_tokens,
166        }
167    }
168}
169
170impl Sub<TokenUsage> for TokenUsage {
171    type Output = Self;
172
173    fn sub(self, other: Self) -> Self {
174        Self {
175            input_tokens: self.input_tokens - other.input_tokens,
176            output_tokens: self.output_tokens - other.output_tokens,
177            cache_creation_input_tokens: self.cache_creation_input_tokens
178                - other.cache_creation_input_tokens,
179            cache_read_input_tokens: self.cache_read_input_tokens - other.cache_read_input_tokens,
180        }
181    }
182}
183
184#[derive(Debug, PartialEq, Eq, Hash, Clone, Serialize, Deserialize)]
185pub struct LanguageModelToolUseId(Arc<str>);
186
187impl fmt::Display for LanguageModelToolUseId {
188    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
189        write!(f, "{}", self.0)
190    }
191}
192
193impl<T> From<T> for LanguageModelToolUseId
194where
195    T: Into<Arc<str>>,
196{
197    fn from(value: T) -> Self {
198        Self(value.into())
199    }
200}
201
202#[derive(Debug, PartialEq, Eq, Hash, Clone, Serialize, Deserialize)]
203pub struct LanguageModelToolUse {
204    pub id: LanguageModelToolUseId,
205    pub name: Arc<str>,
206    pub raw_input: String,
207    pub input: serde_json::Value,
208    pub is_input_complete: bool,
209}
210
211pub struct LanguageModelTextStream {
212    pub message_id: Option<String>,
213    pub stream: BoxStream<'static, Result<String, LanguageModelCompletionError>>,
214    // Has complete token usage after the stream has finished
215    pub last_token_usage: Arc<Mutex<TokenUsage>>,
216}
217
218impl Default for LanguageModelTextStream {
219    fn default() -> Self {
220        Self {
221            message_id: None,
222            stream: Box::pin(futures::stream::empty()),
223            last_token_usage: Arc::new(Mutex::new(TokenUsage::default())),
224        }
225    }
226}
227
228pub trait LanguageModel: Send + Sync {
229    fn id(&self) -> LanguageModelId;
230    fn name(&self) -> LanguageModelName;
231    fn provider_id(&self) -> LanguageModelProviderId;
232    fn provider_name(&self) -> LanguageModelProviderName;
233    fn telemetry_id(&self) -> String;
234
235    fn api_key(&self, _cx: &App) -> Option<String> {
236        None
237    }
238
239    /// Whether this model supports images
240    fn supports_images(&self) -> bool;
241
242    /// Whether this model supports tools.
243    fn supports_tools(&self) -> bool;
244
245    /// Whether this model supports choosing which tool to use.
246    fn supports_tool_choice(&self, choice: LanguageModelToolChoice) -> bool;
247
248    /// Returns whether this model supports "burn mode";
249    fn supports_max_mode(&self) -> bool {
250        false
251    }
252
253    fn tool_input_format(&self) -> LanguageModelToolSchemaFormat {
254        LanguageModelToolSchemaFormat::JsonSchema
255    }
256
257    fn max_token_count(&self) -> u64;
258    fn max_output_tokens(&self) -> Option<u64> {
259        None
260    }
261
262    fn count_tokens(
263        &self,
264        request: LanguageModelRequest,
265        cx: &App,
266    ) -> BoxFuture<'static, Result<u64>>;
267
268    fn stream_completion(
269        &self,
270        request: LanguageModelRequest,
271        cx: &AsyncApp,
272    ) -> BoxFuture<
273        'static,
274        Result<
275            BoxStream<'static, Result<LanguageModelCompletionEvent, LanguageModelCompletionError>>,
276            LanguageModelCompletionError,
277        >,
278    >;
279
280    fn stream_completion_text(
281        &self,
282        request: LanguageModelRequest,
283        cx: &AsyncApp,
284    ) -> BoxFuture<'static, Result<LanguageModelTextStream, LanguageModelCompletionError>> {
285        let future = self.stream_completion(request, cx);
286
287        async move {
288            let events = future.await?;
289            let mut events = events.fuse();
290            let mut message_id = None;
291            let mut first_item_text = None;
292            let last_token_usage = Arc::new(Mutex::new(TokenUsage::default()));
293
294            if let Some(first_event) = events.next().await {
295                match first_event {
296                    Ok(LanguageModelCompletionEvent::StartMessage { message_id: id }) => {
297                        message_id = Some(id.clone());
298                    }
299                    Ok(LanguageModelCompletionEvent::Text(text)) => {
300                        first_item_text = Some(text);
301                    }
302                    _ => (),
303                }
304            }
305
306            let stream = futures::stream::iter(first_item_text.map(Ok))
307                .chain(events.filter_map({
308                    let last_token_usage = last_token_usage.clone();
309                    move |result| {
310                        let last_token_usage = last_token_usage.clone();
311                        async move {
312                            match result {
313                                Ok(LanguageModelCompletionEvent::StatusUpdate { .. }) => None,
314                                Ok(LanguageModelCompletionEvent::StartMessage { .. }) => None,
315                                Ok(LanguageModelCompletionEvent::Text(text)) => Some(Ok(text)),
316                                Ok(LanguageModelCompletionEvent::Thinking { .. }) => None,
317                                Ok(LanguageModelCompletionEvent::Stop(_)) => None,
318                                Ok(LanguageModelCompletionEvent::ToolUse(_)) => None,
319                                Ok(LanguageModelCompletionEvent::UsageUpdate(token_usage)) => {
320                                    *last_token_usage.lock() = token_usage;
321                                    None
322                                }
323                                Err(err) => Some(Err(err)),
324                            }
325                        }
326                    }
327                }))
328                .boxed();
329
330            Ok(LanguageModelTextStream {
331                message_id,
332                stream,
333                last_token_usage,
334            })
335        }
336        .boxed()
337    }
338
339    fn cache_configuration(&self) -> Option<LanguageModelCacheConfiguration> {
340        None
341    }
342
343    #[cfg(any(test, feature = "test-support"))]
344    fn as_fake(&self) -> &fake_provider::FakeLanguageModel {
345        unimplemented!()
346    }
347}
348
349#[derive(Debug, Error)]
350pub enum LanguageModelKnownError {
351    #[error("Context window limit exceeded ({tokens})")]
352    ContextWindowLimitExceeded { tokens: u64 },
353}
354
355pub trait LanguageModelTool: 'static + DeserializeOwned + JsonSchema {
356    fn name() -> String;
357    fn description() -> String;
358}
359
360/// An error that occurred when trying to authenticate the language model provider.
361#[derive(Debug, Error)]
362pub enum AuthenticateError {
363    #[error("credentials not found")]
364    CredentialsNotFound,
365    #[error(transparent)]
366    Other(#[from] anyhow::Error),
367}
368
369pub trait LanguageModelProvider: 'static {
370    fn id(&self) -> LanguageModelProviderId;
371    fn name(&self) -> LanguageModelProviderName;
372    fn icon(&self) -> IconName {
373        IconName::ZedAssistant
374    }
375    fn default_model(&self, cx: &App) -> Option<Arc<dyn LanguageModel>>;
376    fn default_fast_model(&self, cx: &App) -> Option<Arc<dyn LanguageModel>>;
377    fn provided_models(&self, cx: &App) -> Vec<Arc<dyn LanguageModel>>;
378    fn recommended_models(&self, _cx: &App) -> Vec<Arc<dyn LanguageModel>> {
379        Vec::new()
380    }
381    fn is_authenticated(&self, cx: &App) -> bool;
382    fn authenticate(&self, cx: &mut App) -> Task<Result<(), AuthenticateError>>;
383    fn configuration_view(&self, window: &mut Window, cx: &mut App) -> AnyView;
384    fn must_accept_terms(&self, _cx: &App) -> bool {
385        false
386    }
387    fn render_accept_terms(
388        &self,
389        _view: LanguageModelProviderTosView,
390        _cx: &mut App,
391    ) -> Option<AnyElement> {
392        None
393    }
394    fn reset_credentials(&self, cx: &mut App) -> Task<Result<()>>;
395}
396
397#[derive(PartialEq, Eq)]
398pub enum LanguageModelProviderTosView {
399    /// When there are some past interactions in the Agent Panel.
400    ThreadtEmptyState,
401    /// When there are no past interactions in the Agent Panel.
402    ThreadFreshStart,
403    PromptEditorPopup,
404    Configuration,
405}
406
407pub trait LanguageModelProviderState: 'static {
408    type ObservableEntity;
409
410    fn observable_entity(&self) -> Option<gpui::Entity<Self::ObservableEntity>>;
411
412    fn subscribe<T: 'static>(
413        &self,
414        cx: &mut gpui::Context<T>,
415        callback: impl Fn(&mut T, &mut gpui::Context<T>) + 'static,
416    ) -> Option<gpui::Subscription> {
417        let entity = self.observable_entity()?;
418        Some(cx.observe(&entity, move |this, _, cx| {
419            callback(this, cx);
420        }))
421    }
422}
423
424#[derive(Clone, Eq, PartialEq, Hash, Debug, Ord, PartialOrd, Serialize, Deserialize)]
425pub struct LanguageModelId(pub SharedString);
426
427#[derive(Clone, Eq, PartialEq, Hash, Debug, Ord, PartialOrd)]
428pub struct LanguageModelName(pub SharedString);
429
430#[derive(Clone, Eq, PartialEq, Hash, Debug, Ord, PartialOrd)]
431pub struct LanguageModelProviderId(pub SharedString);
432
433#[derive(Clone, Eq, PartialEq, Hash, Debug, Ord, PartialOrd)]
434pub struct LanguageModelProviderName(pub SharedString);
435
436impl fmt::Display for LanguageModelProviderId {
437    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
438        write!(f, "{}", self.0)
439    }
440}
441
442impl From<String> for LanguageModelId {
443    fn from(value: String) -> Self {
444        Self(SharedString::from(value))
445    }
446}
447
448impl From<String> for LanguageModelName {
449    fn from(value: String) -> Self {
450        Self(SharedString::from(value))
451    }
452}
453
454impl From<String> for LanguageModelProviderId {
455    fn from(value: String) -> Self {
456        Self(SharedString::from(value))
457    }
458}
459
460impl From<String> for LanguageModelProviderName {
461    fn from(value: String) -> Self {
462        Self(SharedString::from(value))
463    }
464}