language_model.rs

  1mod model;
  2mod rate_limiter;
  3mod registry;
  4mod request;
  5mod role;
  6mod telemetry;
  7
  8#[cfg(any(test, feature = "test-support"))]
  9pub mod fake_provider;
 10
 11use anyhow::Result;
 12use client::Client;
 13use futures::FutureExt;
 14use futures::{StreamExt, future::BoxFuture, stream::BoxStream};
 15use gpui::{AnyElement, AnyView, App, AsyncApp, SharedString, Task, Window};
 16use icons::IconName;
 17use parking_lot::Mutex;
 18use proto::Plan;
 19use schemars::JsonSchema;
 20use serde::{Deserialize, Serialize, de::DeserializeOwned};
 21use std::fmt;
 22use std::ops::{Add, Sub};
 23use std::sync::Arc;
 24use thiserror::Error;
 25use util::serde::is_default;
 26
 27pub use crate::model::*;
 28pub use crate::rate_limiter::*;
 29pub use crate::registry::*;
 30pub use crate::request::*;
 31pub use crate::role::*;
 32pub use crate::telemetry::*;
 33
 34pub const ZED_CLOUD_PROVIDER_ID: &str = "zed.dev";
 35
 36pub fn init(client: Arc<Client>, cx: &mut App) {
 37    registry::init(cx);
 38    RefreshLlmTokenListener::register(client.clone(), cx);
 39}
 40
 41/// The availability of a [`LanguageModel`].
 42#[derive(Debug, PartialEq, Eq, Clone, Copy)]
 43pub enum LanguageModelAvailability {
 44    /// The language model is available to the general public.
 45    Public,
 46    /// The language model is available to users on the indicated plan.
 47    RequiresPlan(Plan),
 48}
 49
 50/// Configuration for caching language model messages.
 51#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema)]
 52pub struct LanguageModelCacheConfiguration {
 53    pub max_cache_anchors: usize,
 54    pub should_speculate: bool,
 55    pub min_total_token: usize,
 56}
 57
 58/// A completion event from a language model.
 59#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)]
 60pub enum LanguageModelCompletionEvent {
 61    Stop(StopReason),
 62    Text(String),
 63    Thinking(String),
 64    ToolUse(LanguageModelToolUse),
 65    StartMessage { message_id: String },
 66    UsageUpdate(TokenUsage),
 67}
 68
 69/// Indicates the format used to define the input schema for a language model tool.
 70#[derive(Debug, PartialEq, Eq, Clone, Copy, Hash)]
 71pub enum LanguageModelToolSchemaFormat {
 72    /// A JSON schema, see https://json-schema.org
 73    JsonSchema,
 74    /// A subset of an OpenAPI 3.0 schema object supported by Google AI, see https://ai.google.dev/api/caching#Schema
 75    JsonSchemaSubset,
 76}
 77
 78#[derive(Debug, PartialEq, Clone, Copy, Serialize, Deserialize)]
 79#[serde(rename_all = "snake_case")]
 80pub enum StopReason {
 81    EndTurn,
 82    MaxTokens,
 83    ToolUse,
 84}
 85
 86#[derive(Debug, PartialEq, Clone, Copy, Serialize, Deserialize, Default)]
 87pub struct TokenUsage {
 88    #[serde(default, skip_serializing_if = "is_default")]
 89    pub input_tokens: u32,
 90    #[serde(default, skip_serializing_if = "is_default")]
 91    pub output_tokens: u32,
 92    #[serde(default, skip_serializing_if = "is_default")]
 93    pub cache_creation_input_tokens: u32,
 94    #[serde(default, skip_serializing_if = "is_default")]
 95    pub cache_read_input_tokens: u32,
 96}
 97
 98impl TokenUsage {
 99    pub fn total_tokens(&self) -> u32 {
100        self.input_tokens + self.output_tokens
101    }
102}
103
104impl Add<TokenUsage> for TokenUsage {
105    type Output = Self;
106
107    fn add(self, other: Self) -> Self {
108        Self {
109            input_tokens: self.input_tokens + other.input_tokens,
110            output_tokens: self.output_tokens + other.output_tokens,
111            cache_creation_input_tokens: self.cache_creation_input_tokens
112                + other.cache_creation_input_tokens,
113            cache_read_input_tokens: self.cache_read_input_tokens + other.cache_read_input_tokens,
114        }
115    }
116}
117
118impl Sub<TokenUsage> for TokenUsage {
119    type Output = Self;
120
121    fn sub(self, other: Self) -> Self {
122        Self {
123            input_tokens: self.input_tokens - other.input_tokens,
124            output_tokens: self.output_tokens - other.output_tokens,
125            cache_creation_input_tokens: self.cache_creation_input_tokens
126                - other.cache_creation_input_tokens,
127            cache_read_input_tokens: self.cache_read_input_tokens - other.cache_read_input_tokens,
128        }
129    }
130}
131
132#[derive(Debug, PartialEq, Eq, Hash, Clone, Serialize, Deserialize)]
133pub struct LanguageModelToolUseId(Arc<str>);
134
135impl fmt::Display for LanguageModelToolUseId {
136    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
137        write!(f, "{}", self.0)
138    }
139}
140
141impl<T> From<T> for LanguageModelToolUseId
142where
143    T: Into<Arc<str>>,
144{
145    fn from(value: T) -> Self {
146        Self(value.into())
147    }
148}
149
150#[derive(Debug, PartialEq, Eq, Hash, Clone, Serialize, Deserialize)]
151pub struct LanguageModelToolUse {
152    pub id: LanguageModelToolUseId,
153    pub name: Arc<str>,
154    pub input: serde_json::Value,
155}
156
157pub struct LanguageModelTextStream {
158    pub message_id: Option<String>,
159    pub stream: BoxStream<'static, Result<String>>,
160    // Has complete token usage after the stream has finished
161    pub last_token_usage: Arc<Mutex<TokenUsage>>,
162}
163
164impl Default for LanguageModelTextStream {
165    fn default() -> Self {
166        Self {
167            message_id: None,
168            stream: Box::pin(futures::stream::empty()),
169            last_token_usage: Arc::new(Mutex::new(TokenUsage::default())),
170        }
171    }
172}
173
174pub trait LanguageModel: Send + Sync {
175    fn id(&self) -> LanguageModelId;
176    fn name(&self) -> LanguageModelName;
177    fn provider_id(&self) -> LanguageModelProviderId;
178    fn provider_name(&self) -> LanguageModelProviderName;
179    fn telemetry_id(&self) -> String;
180
181    fn api_key(&self, _cx: &App) -> Option<String> {
182        None
183    }
184
185    /// Returns the availability of this language model.
186    fn availability(&self) -> LanguageModelAvailability {
187        LanguageModelAvailability::Public
188    }
189
190    /// Whether this model supports tools.
191    fn supports_tools(&self) -> bool;
192
193    fn tool_input_format(&self) -> LanguageModelToolSchemaFormat {
194        LanguageModelToolSchemaFormat::JsonSchema
195    }
196
197    fn max_token_count(&self) -> usize;
198    fn max_output_tokens(&self) -> Option<u32> {
199        None
200    }
201
202    fn count_tokens(
203        &self,
204        request: LanguageModelRequest,
205        cx: &App,
206    ) -> BoxFuture<'static, Result<usize>>;
207
208    fn stream_completion(
209        &self,
210        request: LanguageModelRequest,
211        cx: &AsyncApp,
212    ) -> BoxFuture<'static, Result<BoxStream<'static, Result<LanguageModelCompletionEvent>>>>;
213
214    fn stream_completion_text(
215        &self,
216        request: LanguageModelRequest,
217        cx: &AsyncApp,
218    ) -> BoxFuture<'static, Result<LanguageModelTextStream>> {
219        let events = self.stream_completion(request, cx);
220
221        async move {
222            let mut events = events.await?.fuse();
223            let mut message_id = None;
224            let mut first_item_text = None;
225            let last_token_usage = Arc::new(Mutex::new(TokenUsage::default()));
226
227            if let Some(first_event) = events.next().await {
228                match first_event {
229                    Ok(LanguageModelCompletionEvent::StartMessage { message_id: id }) => {
230                        message_id = Some(id.clone());
231                    }
232                    Ok(LanguageModelCompletionEvent::Text(text)) => {
233                        first_item_text = Some(text);
234                    }
235                    _ => (),
236                }
237            }
238
239            let stream = futures::stream::iter(first_item_text.map(Ok))
240                .chain(events.filter_map({
241                    let last_token_usage = last_token_usage.clone();
242                    move |result| {
243                        let last_token_usage = last_token_usage.clone();
244                        async move {
245                            match result {
246                                Ok(LanguageModelCompletionEvent::StartMessage { .. }) => None,
247                                Ok(LanguageModelCompletionEvent::Text(text)) => Some(Ok(text)),
248                                Ok(LanguageModelCompletionEvent::Thinking(_)) => None,
249                                Ok(LanguageModelCompletionEvent::Stop(_)) => None,
250                                Ok(LanguageModelCompletionEvent::ToolUse(_)) => None,
251                                Ok(LanguageModelCompletionEvent::UsageUpdate(token_usage)) => {
252                                    *last_token_usage.lock() = token_usage;
253                                    None
254                                }
255                                Err(err) => Some(Err(err)),
256                            }
257                        }
258                    }
259                }))
260                .boxed();
261
262            Ok(LanguageModelTextStream {
263                message_id,
264                stream,
265                last_token_usage,
266            })
267        }
268        .boxed()
269    }
270
271    fn cache_configuration(&self) -> Option<LanguageModelCacheConfiguration> {
272        None
273    }
274
275    #[cfg(any(test, feature = "test-support"))]
276    fn as_fake(&self) -> &fake_provider::FakeLanguageModel {
277        unimplemented!()
278    }
279}
280
281#[derive(Debug, Error)]
282pub enum LanguageModelKnownError {
283    #[error("Context window limit exceeded ({tokens})")]
284    ContextWindowLimitExceeded { tokens: usize },
285}
286
287pub trait LanguageModelTool: 'static + DeserializeOwned + JsonSchema {
288    fn name() -> String;
289    fn description() -> String;
290}
291
292/// An error that occurred when trying to authenticate the language model provider.
293#[derive(Debug, Error)]
294pub enum AuthenticateError {
295    #[error("credentials not found")]
296    CredentialsNotFound,
297    #[error(transparent)]
298    Other(#[from] anyhow::Error),
299}
300
301pub trait LanguageModelProvider: 'static {
302    fn id(&self) -> LanguageModelProviderId;
303    fn name(&self) -> LanguageModelProviderName;
304    fn icon(&self) -> IconName {
305        IconName::ZedAssistant
306    }
307    fn default_model(&self, cx: &App) -> Option<Arc<dyn LanguageModel>>;
308    fn provided_models(&self, cx: &App) -> Vec<Arc<dyn LanguageModel>>;
309    fn recommended_models(&self, _cx: &App) -> Vec<Arc<dyn LanguageModel>> {
310        Vec::new()
311    }
312    fn load_model(&self, _model: Arc<dyn LanguageModel>, _cx: &App) {}
313    fn is_authenticated(&self, cx: &App) -> bool;
314    fn authenticate(&self, cx: &mut App) -> Task<Result<(), AuthenticateError>>;
315    fn configuration_view(&self, window: &mut Window, cx: &mut App) -> AnyView;
316    fn must_accept_terms(&self, _cx: &App) -> bool {
317        false
318    }
319    fn render_accept_terms(
320        &self,
321        _view: LanguageModelProviderTosView,
322        _cx: &mut App,
323    ) -> Option<AnyElement> {
324        None
325    }
326    fn reset_credentials(&self, cx: &mut App) -> Task<Result<()>>;
327}
328
329#[derive(PartialEq, Eq)]
330pub enum LanguageModelProviderTosView {
331    /// When there are some past interactions in the Agent Panel.
332    ThreadtEmptyState,
333    /// When there are no past interactions in the Agent Panel.
334    ThreadFreshStart,
335    PromptEditorPopup,
336    Configuration,
337}
338
339pub trait LanguageModelProviderState: 'static {
340    type ObservableEntity;
341
342    fn observable_entity(&self) -> Option<gpui::Entity<Self::ObservableEntity>>;
343
344    fn subscribe<T: 'static>(
345        &self,
346        cx: &mut gpui::Context<T>,
347        callback: impl Fn(&mut T, &mut gpui::Context<T>) + 'static,
348    ) -> Option<gpui::Subscription> {
349        let entity = self.observable_entity()?;
350        Some(cx.observe(&entity, move |this, _, cx| {
351            callback(this, cx);
352        }))
353    }
354}
355
356#[derive(Clone, Eq, PartialEq, Hash, Debug, Ord, PartialOrd, Serialize, Deserialize)]
357pub struct LanguageModelId(pub SharedString);
358
359#[derive(Clone, Eq, PartialEq, Hash, Debug, Ord, PartialOrd)]
360pub struct LanguageModelName(pub SharedString);
361
362#[derive(Clone, Eq, PartialEq, Hash, Debug, Ord, PartialOrd)]
363pub struct LanguageModelProviderId(pub SharedString);
364
365#[derive(Clone, Eq, PartialEq, Hash, Debug, Ord, PartialOrd)]
366pub struct LanguageModelProviderName(pub SharedString);
367
368impl fmt::Display for LanguageModelProviderId {
369    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
370        write!(f, "{}", self.0)
371    }
372}
373
374impl From<String> for LanguageModelId {
375    fn from(value: String) -> Self {
376        Self(SharedString::from(value))
377    }
378}
379
380impl From<String> for LanguageModelName {
381    fn from(value: String) -> Self {
382        Self(SharedString::from(value))
383    }
384}
385
386impl From<String> for LanguageModelProviderId {
387    fn from(value: String) -> Self {
388        Self(SharedString::from(value))
389    }
390}
391
392impl From<String> for LanguageModelProviderName {
393    fn from(value: String) -> Self {
394        Self(SharedString::from(value))
395    }
396}