language_model.rs

  1mod model;
  2mod rate_limiter;
  3mod registry;
  4mod request;
  5mod role;
  6mod telemetry;
  7
  8#[cfg(any(test, feature = "test-support"))]
  9pub mod fake_provider;
 10
 11use anyhow::{Result, anyhow};
 12use client::Client;
 13use futures::FutureExt;
 14use futures::{StreamExt, future::BoxFuture, stream::BoxStream};
 15use gpui::{AnyElement, AnyView, App, AsyncApp, SharedString, Task, Window};
 16use http_client::http::{HeaderMap, HeaderValue};
 17use icons::IconName;
 18use parking_lot::Mutex;
 19use schemars::JsonSchema;
 20use serde::{Deserialize, Serialize, de::DeserializeOwned};
 21use std::fmt;
 22use std::ops::{Add, Sub};
 23use std::str::FromStr as _;
 24use std::sync::Arc;
 25use thiserror::Error;
 26use util::serde::is_default;
 27use zed_llm_client::{
 28    CompletionRequestStatus, MODEL_REQUESTS_USAGE_AMOUNT_HEADER_NAME,
 29    MODEL_REQUESTS_USAGE_LIMIT_HEADER_NAME, UsageLimit,
 30};
 31
 32pub use crate::model::*;
 33pub use crate::rate_limiter::*;
 34pub use crate::registry::*;
 35pub use crate::request::*;
 36pub use crate::role::*;
 37pub use crate::telemetry::*;
 38
 39pub const ZED_CLOUD_PROVIDER_ID: &str = "zed.dev";
 40
 41pub fn init(client: Arc<Client>, cx: &mut App) {
 42    init_settings(cx);
 43    RefreshLlmTokenListener::register(client.clone(), cx);
 44}
 45
 46pub fn init_settings(cx: &mut App) {
 47    registry::init(cx);
 48}
 49
 50/// Configuration for caching language model messages.
 51#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema)]
 52pub struct LanguageModelCacheConfiguration {
 53    pub max_cache_anchors: usize,
 54    pub should_speculate: bool,
 55    pub min_total_token: usize,
 56}
 57
 58/// A completion event from a language model.
 59#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)]
 60pub enum LanguageModelCompletionEvent {
 61    StatusUpdate(CompletionRequestStatus),
 62    Stop(StopReason),
 63    Text(String),
 64    Thinking {
 65        text: String,
 66        signature: Option<String>,
 67    },
 68    ToolUse(LanguageModelToolUse),
 69    StartMessage {
 70        message_id: String,
 71    },
 72    UsageUpdate(TokenUsage),
 73}
 74
 75#[derive(Error, Debug)]
 76pub enum LanguageModelCompletionError {
 77    #[error("received bad input JSON")]
 78    BadInputJson {
 79        id: LanguageModelToolUseId,
 80        tool_name: Arc<str>,
 81        raw_input: Arc<str>,
 82        json_parse_error: String,
 83    },
 84    #[error(transparent)]
 85    Other(#[from] anyhow::Error),
 86}
 87
 88/// Indicates the format used to define the input schema for a language model tool.
 89#[derive(Debug, PartialEq, Eq, Clone, Copy, Hash)]
 90pub enum LanguageModelToolSchemaFormat {
 91    /// A JSON schema, see https://json-schema.org
 92    JsonSchema,
 93    /// A subset of an OpenAPI 3.0 schema object supported by Google AI, see https://ai.google.dev/api/caching#Schema
 94    JsonSchemaSubset,
 95}
 96
 97#[derive(Debug, PartialEq, Clone, Copy, Serialize, Deserialize)]
 98#[serde(rename_all = "snake_case")]
 99pub enum StopReason {
100    EndTurn,
101    MaxTokens,
102    ToolUse,
103}
104
105#[derive(Debug, Clone, Copy)]
106pub struct RequestUsage {
107    pub limit: UsageLimit,
108    pub amount: i32,
109}
110
111impl RequestUsage {
112    pub fn from_headers(headers: &HeaderMap<HeaderValue>) -> Result<Self> {
113        let limit = headers
114            .get(MODEL_REQUESTS_USAGE_LIMIT_HEADER_NAME)
115            .ok_or_else(|| anyhow!("missing {MODEL_REQUESTS_USAGE_LIMIT_HEADER_NAME:?} header"))?;
116        let limit = UsageLimit::from_str(limit.to_str()?)?;
117
118        let amount = headers
119            .get(MODEL_REQUESTS_USAGE_AMOUNT_HEADER_NAME)
120            .ok_or_else(|| anyhow!("missing {MODEL_REQUESTS_USAGE_AMOUNT_HEADER_NAME:?} header"))?;
121        let amount = amount.to_str()?.parse::<i32>()?;
122
123        Ok(Self { limit, amount })
124    }
125}
126
127#[derive(Debug, PartialEq, Clone, Copy, Serialize, Deserialize, Default)]
128pub struct TokenUsage {
129    #[serde(default, skip_serializing_if = "is_default")]
130    pub input_tokens: u32,
131    #[serde(default, skip_serializing_if = "is_default")]
132    pub output_tokens: u32,
133    #[serde(default, skip_serializing_if = "is_default")]
134    pub cache_creation_input_tokens: u32,
135    #[serde(default, skip_serializing_if = "is_default")]
136    pub cache_read_input_tokens: u32,
137}
138
139impl TokenUsage {
140    pub fn total_tokens(&self) -> u32 {
141        self.input_tokens
142            + self.output_tokens
143            + self.cache_read_input_tokens
144            + self.cache_creation_input_tokens
145    }
146}
147
148impl Add<TokenUsage> for TokenUsage {
149    type Output = Self;
150
151    fn add(self, other: Self) -> Self {
152        Self {
153            input_tokens: self.input_tokens + other.input_tokens,
154            output_tokens: self.output_tokens + other.output_tokens,
155            cache_creation_input_tokens: self.cache_creation_input_tokens
156                + other.cache_creation_input_tokens,
157            cache_read_input_tokens: self.cache_read_input_tokens + other.cache_read_input_tokens,
158        }
159    }
160}
161
162impl Sub<TokenUsage> for TokenUsage {
163    type Output = Self;
164
165    fn sub(self, other: Self) -> Self {
166        Self {
167            input_tokens: self.input_tokens - other.input_tokens,
168            output_tokens: self.output_tokens - other.output_tokens,
169            cache_creation_input_tokens: self.cache_creation_input_tokens
170                - other.cache_creation_input_tokens,
171            cache_read_input_tokens: self.cache_read_input_tokens - other.cache_read_input_tokens,
172        }
173    }
174}
175
176#[derive(Debug, PartialEq, Eq, Hash, Clone, Serialize, Deserialize)]
177pub struct LanguageModelToolUseId(Arc<str>);
178
179impl fmt::Display for LanguageModelToolUseId {
180    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
181        write!(f, "{}", self.0)
182    }
183}
184
185impl<T> From<T> for LanguageModelToolUseId
186where
187    T: Into<Arc<str>>,
188{
189    fn from(value: T) -> Self {
190        Self(value.into())
191    }
192}
193
194#[derive(Debug, PartialEq, Eq, Hash, Clone, Serialize, Deserialize)]
195pub struct LanguageModelToolUse {
196    pub id: LanguageModelToolUseId,
197    pub name: Arc<str>,
198    pub raw_input: String,
199    pub input: serde_json::Value,
200    pub is_input_complete: bool,
201}
202
203pub struct LanguageModelTextStream {
204    pub message_id: Option<String>,
205    pub stream: BoxStream<'static, Result<String, LanguageModelCompletionError>>,
206    // Has complete token usage after the stream has finished
207    pub last_token_usage: Arc<Mutex<TokenUsage>>,
208}
209
210impl Default for LanguageModelTextStream {
211    fn default() -> Self {
212        Self {
213            message_id: None,
214            stream: Box::pin(futures::stream::empty()),
215            last_token_usage: Arc::new(Mutex::new(TokenUsage::default())),
216        }
217    }
218}
219
220pub trait LanguageModel: Send + Sync {
221    fn id(&self) -> LanguageModelId;
222    fn name(&self) -> LanguageModelName;
223    fn provider_id(&self) -> LanguageModelProviderId;
224    fn provider_name(&self) -> LanguageModelProviderName;
225    fn telemetry_id(&self) -> String;
226
227    fn api_key(&self, _cx: &App) -> Option<String> {
228        None
229    }
230
231    /// Whether this model supports images
232    fn supports_images(&self) -> bool;
233
234    /// Whether this model supports tools.
235    fn supports_tools(&self) -> bool;
236
237    /// Whether this model supports choosing which tool to use.
238    fn supports_tool_choice(&self, choice: LanguageModelToolChoice) -> bool;
239
240    /// Returns whether this model supports "max mode";
241    fn supports_max_mode(&self) -> bool {
242        if self.provider_id().0 != ZED_CLOUD_PROVIDER_ID {
243            return false;
244        }
245
246        const MAX_MODE_CAPABLE_MODELS: &[CloudModel] = &[
247            CloudModel::Anthropic(anthropic::Model::Claude3_7Sonnet),
248            CloudModel::Anthropic(anthropic::Model::Claude3_7SonnetThinking),
249        ];
250
251        for model in MAX_MODE_CAPABLE_MODELS {
252            if self.id().0 == model.id() {
253                return true;
254            }
255        }
256
257        false
258    }
259
260    fn tool_input_format(&self) -> LanguageModelToolSchemaFormat {
261        LanguageModelToolSchemaFormat::JsonSchema
262    }
263
264    fn max_token_count(&self) -> usize;
265    fn max_output_tokens(&self) -> Option<u32> {
266        None
267    }
268
269    fn count_tokens(
270        &self,
271        request: LanguageModelRequest,
272        cx: &App,
273    ) -> BoxFuture<'static, Result<usize>>;
274
275    fn stream_completion(
276        &self,
277        request: LanguageModelRequest,
278        cx: &AsyncApp,
279    ) -> BoxFuture<
280        'static,
281        Result<
282            BoxStream<'static, Result<LanguageModelCompletionEvent, LanguageModelCompletionError>>,
283        >,
284    >;
285
286    fn stream_completion_text(
287        &self,
288        request: LanguageModelRequest,
289        cx: &AsyncApp,
290    ) -> BoxFuture<'static, Result<LanguageModelTextStream>> {
291        let future = self.stream_completion(request, cx);
292
293        async move {
294            let events = future.await?;
295            let mut events = events.fuse();
296            let mut message_id = None;
297            let mut first_item_text = None;
298            let last_token_usage = Arc::new(Mutex::new(TokenUsage::default()));
299
300            if let Some(first_event) = events.next().await {
301                match first_event {
302                    Ok(LanguageModelCompletionEvent::StartMessage { message_id: id }) => {
303                        message_id = Some(id.clone());
304                    }
305                    Ok(LanguageModelCompletionEvent::Text(text)) => {
306                        first_item_text = Some(text);
307                    }
308                    _ => (),
309                }
310            }
311
312            let stream = futures::stream::iter(first_item_text.map(Ok))
313                .chain(events.filter_map({
314                    let last_token_usage = last_token_usage.clone();
315                    move |result| {
316                        let last_token_usage = last_token_usage.clone();
317                        async move {
318                            match result {
319                                Ok(LanguageModelCompletionEvent::StatusUpdate { .. }) => None,
320                                Ok(LanguageModelCompletionEvent::StartMessage { .. }) => None,
321                                Ok(LanguageModelCompletionEvent::Text(text)) => Some(Ok(text)),
322                                Ok(LanguageModelCompletionEvent::Thinking { .. }) => None,
323                                Ok(LanguageModelCompletionEvent::Stop(_)) => None,
324                                Ok(LanguageModelCompletionEvent::ToolUse(_)) => None,
325                                Ok(LanguageModelCompletionEvent::UsageUpdate(token_usage)) => {
326                                    *last_token_usage.lock() = token_usage;
327                                    None
328                                }
329                                Err(err) => Some(Err(err)),
330                            }
331                        }
332                    }
333                }))
334                .boxed();
335
336            Ok(LanguageModelTextStream {
337                message_id,
338                stream,
339                last_token_usage,
340            })
341        }
342        .boxed()
343    }
344
345    fn cache_configuration(&self) -> Option<LanguageModelCacheConfiguration> {
346        None
347    }
348
349    #[cfg(any(test, feature = "test-support"))]
350    fn as_fake(&self) -> &fake_provider::FakeLanguageModel {
351        unimplemented!()
352    }
353}
354
355#[derive(Debug, Error)]
356pub enum LanguageModelKnownError {
357    #[error("Context window limit exceeded ({tokens})")]
358    ContextWindowLimitExceeded { tokens: usize },
359}
360
361pub trait LanguageModelTool: 'static + DeserializeOwned + JsonSchema {
362    fn name() -> String;
363    fn description() -> String;
364}
365
366/// An error that occurred when trying to authenticate the language model provider.
367#[derive(Debug, Error)]
368pub enum AuthenticateError {
369    #[error("credentials not found")]
370    CredentialsNotFound,
371    #[error(transparent)]
372    Other(#[from] anyhow::Error),
373}
374
375pub trait LanguageModelProvider: 'static {
376    fn id(&self) -> LanguageModelProviderId;
377    fn name(&self) -> LanguageModelProviderName;
378    fn icon(&self) -> IconName {
379        IconName::ZedAssistant
380    }
381    fn default_model(&self, cx: &App) -> Option<Arc<dyn LanguageModel>>;
382    fn default_fast_model(&self, cx: &App) -> Option<Arc<dyn LanguageModel>>;
383    fn provided_models(&self, cx: &App) -> Vec<Arc<dyn LanguageModel>>;
384    fn recommended_models(&self, _cx: &App) -> Vec<Arc<dyn LanguageModel>> {
385        Vec::new()
386    }
387    fn load_model(&self, _model: Arc<dyn LanguageModel>, _cx: &App) {}
388    fn is_authenticated(&self, cx: &App) -> bool;
389    fn authenticate(&self, cx: &mut App) -> Task<Result<(), AuthenticateError>>;
390    fn configuration_view(&self, window: &mut Window, cx: &mut App) -> AnyView;
391    fn must_accept_terms(&self, _cx: &App) -> bool {
392        false
393    }
394    fn render_accept_terms(
395        &self,
396        _view: LanguageModelProviderTosView,
397        _cx: &mut App,
398    ) -> Option<AnyElement> {
399        None
400    }
401    fn reset_credentials(&self, cx: &mut App) -> Task<Result<()>>;
402}
403
404#[derive(PartialEq, Eq)]
405pub enum LanguageModelProviderTosView {
406    /// When there are some past interactions in the Agent Panel.
407    ThreadtEmptyState,
408    /// When there are no past interactions in the Agent Panel.
409    ThreadFreshStart,
410    PromptEditorPopup,
411    Configuration,
412}
413
414pub trait LanguageModelProviderState: 'static {
415    type ObservableEntity;
416
417    fn observable_entity(&self) -> Option<gpui::Entity<Self::ObservableEntity>>;
418
419    fn subscribe<T: 'static>(
420        &self,
421        cx: &mut gpui::Context<T>,
422        callback: impl Fn(&mut T, &mut gpui::Context<T>) + 'static,
423    ) -> Option<gpui::Subscription> {
424        let entity = self.observable_entity()?;
425        Some(cx.observe(&entity, move |this, _, cx| {
426            callback(this, cx);
427        }))
428    }
429}
430
431#[derive(Clone, Eq, PartialEq, Hash, Debug, Ord, PartialOrd, Serialize, Deserialize)]
432pub struct LanguageModelId(pub SharedString);
433
434#[derive(Clone, Eq, PartialEq, Hash, Debug, Ord, PartialOrd)]
435pub struct LanguageModelName(pub SharedString);
436
437#[derive(Clone, Eq, PartialEq, Hash, Debug, Ord, PartialOrd)]
438pub struct LanguageModelProviderId(pub SharedString);
439
440#[derive(Clone, Eq, PartialEq, Hash, Debug, Ord, PartialOrd)]
441pub struct LanguageModelProviderName(pub SharedString);
442
443impl fmt::Display for LanguageModelProviderId {
444    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
445        write!(f, "{}", self.0)
446    }
447}
448
449impl From<String> for LanguageModelId {
450    fn from(value: String) -> Self {
451        Self(SharedString::from(value))
452    }
453}
454
455impl From<String> for LanguageModelName {
456    fn from(value: String) -> Self {
457        Self(SharedString::from(value))
458    }
459}
460
461impl From<String> for LanguageModelProviderId {
462    fn from(value: String) -> Self {
463        Self(SharedString::from(value))
464    }
465}
466
467impl From<String> for LanguageModelProviderName {
468    fn from(value: String) -> Self {
469        Self(SharedString::from(value))
470    }
471}