language_model.rs

   1mod api_key;
   2mod model;
   3mod rate_limiter;
   4mod registry;
   5mod request;
   6mod role;
   7mod telemetry;
   8pub mod tool_schema;
   9
  10#[cfg(any(test, feature = "test-support"))]
  11pub mod fake_provider;
  12
  13use anthropic::{AnthropicError, parse_prompt_too_long};
  14use anyhow::{Result, anyhow};
  15use client::Client;
  16use cloud_llm_client::{CompletionMode, CompletionRequestStatus, UsageLimit};
  17use futures::FutureExt;
  18use futures::{StreamExt, future::BoxFuture, stream::BoxStream};
  19use gpui::{AnyView, App, AsyncApp, SharedString, Task, Window};
  20use http_client::{StatusCode, http};
  21use icons::IconName;
  22use open_router::OpenRouterError;
  23use parking_lot::Mutex;
  24use serde::{Deserialize, Serialize};
  25pub use settings::LanguageModelCacheConfiguration;
  26use std::ops::{Add, Sub};
  27use std::str::FromStr;
  28use std::sync::Arc;
  29use std::time::Duration;
  30use std::{fmt, io};
  31use thiserror::Error;
  32use util::serde::is_default;
  33
  34pub use crate::api_key::{ApiKey, ApiKeyState};
  35pub use crate::model::*;
  36pub use crate::rate_limiter::*;
  37pub use crate::registry::*;
  38pub use crate::request::*;
  39pub use crate::role::*;
  40pub use crate::telemetry::*;
  41pub use crate::tool_schema::LanguageModelToolSchemaFormat;
  42pub use zed_env_vars::{EnvVar, env_var};
  43
  44pub const ANTHROPIC_PROVIDER_ID: LanguageModelProviderId =
  45    LanguageModelProviderId::new("anthropic");
  46pub const ANTHROPIC_PROVIDER_NAME: LanguageModelProviderName =
  47    LanguageModelProviderName::new("Anthropic");
  48
  49pub const GOOGLE_PROVIDER_ID: LanguageModelProviderId = LanguageModelProviderId::new("google");
  50pub const GOOGLE_PROVIDER_NAME: LanguageModelProviderName =
  51    LanguageModelProviderName::new("Google AI");
  52
  53pub const OPEN_AI_PROVIDER_ID: LanguageModelProviderId = LanguageModelProviderId::new("openai");
  54pub const OPEN_AI_PROVIDER_NAME: LanguageModelProviderName =
  55    LanguageModelProviderName::new("OpenAI");
  56
  57pub const X_AI_PROVIDER_ID: LanguageModelProviderId = LanguageModelProviderId::new("x_ai");
  58pub const X_AI_PROVIDER_NAME: LanguageModelProviderName = LanguageModelProviderName::new("xAI");
  59
  60pub const ZED_CLOUD_PROVIDER_ID: LanguageModelProviderId = LanguageModelProviderId::new("zed.dev");
  61pub const ZED_CLOUD_PROVIDER_NAME: LanguageModelProviderName =
  62    LanguageModelProviderName::new("Zed");
  63
  64pub fn init(client: Arc<Client>, cx: &mut App) {
  65    init_settings(cx);
  66    RefreshLlmTokenListener::register(client, cx);
  67}
  68
  69pub fn init_settings(cx: &mut App) {
  70    registry::init(cx);
  71}
  72
  73/// A completion event from a language model.
  74#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)]
  75pub enum LanguageModelCompletionEvent {
  76    Queued {
  77        position: usize,
  78    },
  79    Started,
  80    UsageUpdated {
  81        amount: usize,
  82        limit: UsageLimit,
  83    },
  84    ToolUseLimitReached,
  85    Stop(StopReason),
  86    Text(String),
  87    Thinking {
  88        text: String,
  89        signature: Option<String>,
  90    },
  91    RedactedThinking {
  92        data: String,
  93    },
  94    ToolUse(LanguageModelToolUse),
  95    ToolUseJsonParseError {
  96        id: LanguageModelToolUseId,
  97        tool_name: Arc<str>,
  98        raw_input: Arc<str>,
  99        json_parse_error: String,
 100    },
 101    StartMessage {
 102        message_id: String,
 103    },
 104    ReasoningDetails(serde_json::Value),
 105    UsageUpdate(TokenUsage),
 106}
 107
 108impl LanguageModelCompletionEvent {
 109    pub fn from_completion_request_status(
 110        status: CompletionRequestStatus,
 111        upstream_provider: LanguageModelProviderName,
 112    ) -> Result<Self, LanguageModelCompletionError> {
 113        match status {
 114            CompletionRequestStatus::Queued { position } => {
 115                Ok(LanguageModelCompletionEvent::Queued { position })
 116            }
 117            CompletionRequestStatus::Started => Ok(LanguageModelCompletionEvent::Started),
 118            CompletionRequestStatus::UsageUpdated { amount, limit } => {
 119                Ok(LanguageModelCompletionEvent::UsageUpdated { amount, limit })
 120            }
 121            CompletionRequestStatus::ToolUseLimitReached => {
 122                Ok(LanguageModelCompletionEvent::ToolUseLimitReached)
 123            }
 124            CompletionRequestStatus::Failed {
 125                code,
 126                message,
 127                request_id: _,
 128                retry_after,
 129            } => Err(LanguageModelCompletionError::from_cloud_failure(
 130                upstream_provider,
 131                code,
 132                message,
 133                retry_after.map(Duration::from_secs_f64),
 134            )),
 135        }
 136    }
 137}
 138
 139#[derive(Error, Debug)]
 140pub enum LanguageModelCompletionError {
 141    #[error("prompt too large for context window")]
 142    PromptTooLarge { tokens: Option<u64> },
 143    #[error("missing {provider} API key")]
 144    NoApiKey { provider: LanguageModelProviderName },
 145    #[error("{provider}'s API rate limit exceeded")]
 146    RateLimitExceeded {
 147        provider: LanguageModelProviderName,
 148        retry_after: Option<Duration>,
 149    },
 150    #[error("{provider}'s API servers are overloaded right now")]
 151    ServerOverloaded {
 152        provider: LanguageModelProviderName,
 153        retry_after: Option<Duration>,
 154    },
 155    #[error("{provider}'s API server reported an internal server error: {message}")]
 156    ApiInternalServerError {
 157        provider: LanguageModelProviderName,
 158        message: String,
 159    },
 160    #[error("{message}")]
 161    UpstreamProviderError {
 162        message: String,
 163        status: StatusCode,
 164        retry_after: Option<Duration>,
 165    },
 166    #[error("HTTP response error from {provider}'s API: status {status_code} - {message:?}")]
 167    HttpResponseError {
 168        provider: LanguageModelProviderName,
 169        status_code: StatusCode,
 170        message: String,
 171    },
 172
 173    // Client errors
 174    #[error("invalid request format to {provider}'s API: {message}")]
 175    BadRequestFormat {
 176        provider: LanguageModelProviderName,
 177        message: String,
 178    },
 179    #[error("authentication error with {provider}'s API: {message}")]
 180    AuthenticationError {
 181        provider: LanguageModelProviderName,
 182        message: String,
 183    },
 184    #[error("Permission error with {provider}'s API: {message}")]
 185    PermissionError {
 186        provider: LanguageModelProviderName,
 187        message: String,
 188    },
 189    #[error("language model provider API endpoint not found")]
 190    ApiEndpointNotFound { provider: LanguageModelProviderName },
 191    #[error("I/O error reading response from {provider}'s API")]
 192    ApiReadResponseError {
 193        provider: LanguageModelProviderName,
 194        #[source]
 195        error: io::Error,
 196    },
 197    #[error("error serializing request to {provider} API")]
 198    SerializeRequest {
 199        provider: LanguageModelProviderName,
 200        #[source]
 201        error: serde_json::Error,
 202    },
 203    #[error("error building request body to {provider} API")]
 204    BuildRequestBody {
 205        provider: LanguageModelProviderName,
 206        #[source]
 207        error: http::Error,
 208    },
 209    #[error("error sending HTTP request to {provider} API")]
 210    HttpSend {
 211        provider: LanguageModelProviderName,
 212        #[source]
 213        error: anyhow::Error,
 214    },
 215    #[error("error deserializing {provider} API response")]
 216    DeserializeResponse {
 217        provider: LanguageModelProviderName,
 218        #[source]
 219        error: serde_json::Error,
 220    },
 221
 222    // TODO: Ideally this would be removed in favor of having a comprehensive list of errors.
 223    #[error(transparent)]
 224    Other(#[from] anyhow::Error),
 225}
 226
 227impl LanguageModelCompletionError {
 228    fn parse_upstream_error_json(message: &str) -> Option<(StatusCode, String)> {
 229        let error_json = serde_json::from_str::<serde_json::Value>(message).ok()?;
 230        let upstream_status = error_json
 231            .get("upstream_status")
 232            .and_then(|v| v.as_u64())
 233            .and_then(|status| u16::try_from(status).ok())
 234            .and_then(|status| StatusCode::from_u16(status).ok())?;
 235        let inner_message = error_json
 236            .get("message")
 237            .and_then(|v| v.as_str())
 238            .unwrap_or(message)
 239            .to_string();
 240        Some((upstream_status, inner_message))
 241    }
 242
 243    pub fn from_cloud_failure(
 244        upstream_provider: LanguageModelProviderName,
 245        code: String,
 246        message: String,
 247        retry_after: Option<Duration>,
 248    ) -> Self {
 249        if let Some(tokens) = parse_prompt_too_long(&message) {
 250            // TODO: currently Anthropic PAYLOAD_TOO_LARGE response may cause INTERNAL_SERVER_ERROR
 251            // to be reported. This is a temporary workaround to handle this in the case where the
 252            // token limit has been exceeded.
 253            Self::PromptTooLarge {
 254                tokens: Some(tokens),
 255            }
 256        } else if code == "upstream_http_error" {
 257            if let Some((upstream_status, inner_message)) =
 258                Self::parse_upstream_error_json(&message)
 259            {
 260                return Self::from_http_status(
 261                    upstream_provider,
 262                    upstream_status,
 263                    inner_message,
 264                    retry_after,
 265                );
 266            }
 267            anyhow!("completion request failed, code: {code}, message: {message}").into()
 268        } else if let Some(status_code) = code
 269            .strip_prefix("upstream_http_")
 270            .and_then(|code| StatusCode::from_str(code).ok())
 271        {
 272            Self::from_http_status(upstream_provider, status_code, message, retry_after)
 273        } else if let Some(status_code) = code
 274            .strip_prefix("http_")
 275            .and_then(|code| StatusCode::from_str(code).ok())
 276        {
 277            Self::from_http_status(ZED_CLOUD_PROVIDER_NAME, status_code, message, retry_after)
 278        } else {
 279            anyhow!("completion request failed, code: {code}, message: {message}").into()
 280        }
 281    }
 282
 283    pub fn from_http_status(
 284        provider: LanguageModelProviderName,
 285        status_code: StatusCode,
 286        message: String,
 287        retry_after: Option<Duration>,
 288    ) -> Self {
 289        match status_code {
 290            StatusCode::BAD_REQUEST => Self::BadRequestFormat { provider, message },
 291            StatusCode::UNAUTHORIZED => Self::AuthenticationError { provider, message },
 292            StatusCode::FORBIDDEN => Self::PermissionError { provider, message },
 293            StatusCode::NOT_FOUND => Self::ApiEndpointNotFound { provider },
 294            StatusCode::PAYLOAD_TOO_LARGE => Self::PromptTooLarge {
 295                tokens: parse_prompt_too_long(&message),
 296            },
 297            StatusCode::TOO_MANY_REQUESTS => Self::RateLimitExceeded {
 298                provider,
 299                retry_after,
 300            },
 301            StatusCode::INTERNAL_SERVER_ERROR => Self::ApiInternalServerError { provider, message },
 302            StatusCode::SERVICE_UNAVAILABLE => Self::ServerOverloaded {
 303                provider,
 304                retry_after,
 305            },
 306            _ if status_code.as_u16() == 529 => Self::ServerOverloaded {
 307                provider,
 308                retry_after,
 309            },
 310            _ => Self::HttpResponseError {
 311                provider,
 312                status_code,
 313                message,
 314            },
 315        }
 316    }
 317}
 318
 319impl From<AnthropicError> for LanguageModelCompletionError {
 320    fn from(error: AnthropicError) -> Self {
 321        let provider = ANTHROPIC_PROVIDER_NAME;
 322        match error {
 323            AnthropicError::SerializeRequest(error) => Self::SerializeRequest { provider, error },
 324            AnthropicError::BuildRequestBody(error) => Self::BuildRequestBody { provider, error },
 325            AnthropicError::HttpSend(error) => Self::HttpSend { provider, error },
 326            AnthropicError::DeserializeResponse(error) => {
 327                Self::DeserializeResponse { provider, error }
 328            }
 329            AnthropicError::ReadResponse(error) => Self::ApiReadResponseError { provider, error },
 330            AnthropicError::HttpResponseError {
 331                status_code,
 332                message,
 333            } => Self::HttpResponseError {
 334                provider,
 335                status_code,
 336                message,
 337            },
 338            AnthropicError::RateLimit { retry_after } => Self::RateLimitExceeded {
 339                provider,
 340                retry_after: Some(retry_after),
 341            },
 342            AnthropicError::ServerOverloaded { retry_after } => Self::ServerOverloaded {
 343                provider,
 344                retry_after,
 345            },
 346            AnthropicError::ApiError(api_error) => api_error.into(),
 347        }
 348    }
 349}
 350
 351impl From<anthropic::ApiError> for LanguageModelCompletionError {
 352    fn from(error: anthropic::ApiError) -> Self {
 353        use anthropic::ApiErrorCode::*;
 354        let provider = ANTHROPIC_PROVIDER_NAME;
 355        match error.code() {
 356            Some(code) => match code {
 357                InvalidRequestError => Self::BadRequestFormat {
 358                    provider,
 359                    message: error.message,
 360                },
 361                AuthenticationError => Self::AuthenticationError {
 362                    provider,
 363                    message: error.message,
 364                },
 365                PermissionError => Self::PermissionError {
 366                    provider,
 367                    message: error.message,
 368                },
 369                NotFoundError => Self::ApiEndpointNotFound { provider },
 370                RequestTooLarge => Self::PromptTooLarge {
 371                    tokens: parse_prompt_too_long(&error.message),
 372                },
 373                RateLimitError => Self::RateLimitExceeded {
 374                    provider,
 375                    retry_after: None,
 376                },
 377                ApiError => Self::ApiInternalServerError {
 378                    provider,
 379                    message: error.message,
 380                },
 381                OverloadedError => Self::ServerOverloaded {
 382                    provider,
 383                    retry_after: None,
 384                },
 385            },
 386            None => Self::Other(error.into()),
 387        }
 388    }
 389}
 390
 391impl From<open_ai::RequestError> for LanguageModelCompletionError {
 392    fn from(error: open_ai::RequestError) -> Self {
 393        match error {
 394            open_ai::RequestError::HttpResponseError {
 395                provider,
 396                status_code,
 397                body,
 398                headers,
 399            } => {
 400                let retry_after = headers
 401                    .get(http::header::RETRY_AFTER)
 402                    .and_then(|val| val.to_str().ok()?.parse::<u64>().ok())
 403                    .map(Duration::from_secs);
 404
 405                Self::from_http_status(provider.into(), status_code, body, retry_after)
 406            }
 407            open_ai::RequestError::Other(e) => Self::Other(e),
 408        }
 409    }
 410}
 411
 412impl From<OpenRouterError> for LanguageModelCompletionError {
 413    fn from(error: OpenRouterError) -> Self {
 414        let provider = LanguageModelProviderName::new("OpenRouter");
 415        match error {
 416            OpenRouterError::SerializeRequest(error) => Self::SerializeRequest { provider, error },
 417            OpenRouterError::BuildRequestBody(error) => Self::BuildRequestBody { provider, error },
 418            OpenRouterError::HttpSend(error) => Self::HttpSend { provider, error },
 419            OpenRouterError::DeserializeResponse(error) => {
 420                Self::DeserializeResponse { provider, error }
 421            }
 422            OpenRouterError::ReadResponse(error) => Self::ApiReadResponseError { provider, error },
 423            OpenRouterError::RateLimit { retry_after } => Self::RateLimitExceeded {
 424                provider,
 425                retry_after: Some(retry_after),
 426            },
 427            OpenRouterError::ServerOverloaded { retry_after } => Self::ServerOverloaded {
 428                provider,
 429                retry_after,
 430            },
 431            OpenRouterError::ApiError(api_error) => api_error.into(),
 432        }
 433    }
 434}
 435
 436impl From<open_router::ApiError> for LanguageModelCompletionError {
 437    fn from(error: open_router::ApiError) -> Self {
 438        use open_router::ApiErrorCode::*;
 439        let provider = LanguageModelProviderName::new("OpenRouter");
 440        match error.code {
 441            InvalidRequestError => Self::BadRequestFormat {
 442                provider,
 443                message: error.message,
 444            },
 445            AuthenticationError => Self::AuthenticationError {
 446                provider,
 447                message: error.message,
 448            },
 449            PaymentRequiredError => Self::AuthenticationError {
 450                provider,
 451                message: format!("Payment required: {}", error.message),
 452            },
 453            PermissionError => Self::PermissionError {
 454                provider,
 455                message: error.message,
 456            },
 457            RequestTimedOut => Self::HttpResponseError {
 458                provider,
 459                status_code: StatusCode::REQUEST_TIMEOUT,
 460                message: error.message,
 461            },
 462            RateLimitError => Self::RateLimitExceeded {
 463                provider,
 464                retry_after: None,
 465            },
 466            ApiError => Self::ApiInternalServerError {
 467                provider,
 468                message: error.message,
 469            },
 470            OverloadedError => Self::ServerOverloaded {
 471                provider,
 472                retry_after: None,
 473            },
 474        }
 475    }
 476}
 477
 478#[derive(Debug, PartialEq, Clone, Copy, Serialize, Deserialize)]
 479#[serde(rename_all = "snake_case")]
 480pub enum StopReason {
 481    EndTurn,
 482    MaxTokens,
 483    ToolUse,
 484    Refusal,
 485}
 486
 487#[derive(Debug, PartialEq, Clone, Copy, Serialize, Deserialize, Default)]
 488pub struct TokenUsage {
 489    #[serde(default, skip_serializing_if = "is_default")]
 490    pub input_tokens: u64,
 491    #[serde(default, skip_serializing_if = "is_default")]
 492    pub output_tokens: u64,
 493    #[serde(default, skip_serializing_if = "is_default")]
 494    pub cache_creation_input_tokens: u64,
 495    #[serde(default, skip_serializing_if = "is_default")]
 496    pub cache_read_input_tokens: u64,
 497}
 498
 499impl TokenUsage {
 500    pub fn total_tokens(&self) -> u64 {
 501        self.input_tokens
 502            + self.output_tokens
 503            + self.cache_read_input_tokens
 504            + self.cache_creation_input_tokens
 505    }
 506}
 507
 508impl Add<TokenUsage> for TokenUsage {
 509    type Output = Self;
 510
 511    fn add(self, other: Self) -> Self {
 512        Self {
 513            input_tokens: self.input_tokens + other.input_tokens,
 514            output_tokens: self.output_tokens + other.output_tokens,
 515            cache_creation_input_tokens: self.cache_creation_input_tokens
 516                + other.cache_creation_input_tokens,
 517            cache_read_input_tokens: self.cache_read_input_tokens + other.cache_read_input_tokens,
 518        }
 519    }
 520}
 521
 522impl Sub<TokenUsage> for TokenUsage {
 523    type Output = Self;
 524
 525    fn sub(self, other: Self) -> Self {
 526        Self {
 527            input_tokens: self.input_tokens - other.input_tokens,
 528            output_tokens: self.output_tokens - other.output_tokens,
 529            cache_creation_input_tokens: self.cache_creation_input_tokens
 530                - other.cache_creation_input_tokens,
 531            cache_read_input_tokens: self.cache_read_input_tokens - other.cache_read_input_tokens,
 532        }
 533    }
 534}
 535
 536#[derive(Debug, PartialEq, Eq, Hash, Clone, Serialize, Deserialize)]
 537pub struct LanguageModelToolUseId(Arc<str>);
 538
 539impl fmt::Display for LanguageModelToolUseId {
 540    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
 541        write!(f, "{}", self.0)
 542    }
 543}
 544
 545impl<T> From<T> for LanguageModelToolUseId
 546where
 547    T: Into<Arc<str>>,
 548{
 549    fn from(value: T) -> Self {
 550        Self(value.into())
 551    }
 552}
 553
 554#[derive(Debug, PartialEq, Eq, Hash, Clone, Serialize, Deserialize)]
 555pub struct LanguageModelToolUse {
 556    pub id: LanguageModelToolUseId,
 557    pub name: Arc<str>,
 558    pub raw_input: String,
 559    pub input: serde_json::Value,
 560    pub is_input_complete: bool,
 561    /// Thought signature the model sent us. Some models require that this
 562    /// signature be preserved and sent back in conversation history for validation.
 563    pub thought_signature: Option<String>,
 564}
 565
 566pub struct LanguageModelTextStream {
 567    pub message_id: Option<String>,
 568    pub stream: BoxStream<'static, Result<String, LanguageModelCompletionError>>,
 569    // Has complete token usage after the stream has finished
 570    pub last_token_usage: Arc<Mutex<TokenUsage>>,
 571}
 572
 573impl Default for LanguageModelTextStream {
 574    fn default() -> Self {
 575        Self {
 576            message_id: None,
 577            stream: Box::pin(futures::stream::empty()),
 578            last_token_usage: Arc::new(Mutex::new(TokenUsage::default())),
 579        }
 580    }
 581}
 582
 583pub trait LanguageModel: Send + Sync {
 584    fn id(&self) -> LanguageModelId;
 585    fn name(&self) -> LanguageModelName;
 586    fn provider_id(&self) -> LanguageModelProviderId;
 587    fn provider_name(&self) -> LanguageModelProviderName;
 588    fn upstream_provider_id(&self) -> LanguageModelProviderId {
 589        self.provider_id()
 590    }
 591    fn upstream_provider_name(&self) -> LanguageModelProviderName {
 592        self.provider_name()
 593    }
 594
 595    fn telemetry_id(&self) -> String;
 596
 597    fn api_key(&self, _cx: &App) -> Option<String> {
 598        None
 599    }
 600
 601    /// Whether this model supports images
 602    fn supports_images(&self) -> bool;
 603
 604    /// Whether this model supports tools.
 605    fn supports_tools(&self) -> bool;
 606
 607    /// Whether this model supports choosing which tool to use.
 608    fn supports_tool_choice(&self, choice: LanguageModelToolChoice) -> bool;
 609
 610    /// Returns whether this model supports "burn mode";
 611    fn supports_burn_mode(&self) -> bool {
 612        false
 613    }
 614
 615    fn tool_input_format(&self) -> LanguageModelToolSchemaFormat {
 616        LanguageModelToolSchemaFormat::JsonSchema
 617    }
 618
 619    fn max_token_count(&self) -> u64;
 620    /// Returns the maximum token count for this model in burn mode (If `supports_burn_mode` is `false` this returns `None`)
 621    fn max_token_count_in_burn_mode(&self) -> Option<u64> {
 622        None
 623    }
 624    fn max_output_tokens(&self) -> Option<u64> {
 625        None
 626    }
 627
 628    fn count_tokens(
 629        &self,
 630        request: LanguageModelRequest,
 631        cx: &App,
 632    ) -> BoxFuture<'static, Result<u64>>;
 633
 634    fn stream_completion(
 635        &self,
 636        request: LanguageModelRequest,
 637        cx: &AsyncApp,
 638    ) -> BoxFuture<
 639        'static,
 640        Result<
 641            BoxStream<'static, Result<LanguageModelCompletionEvent, LanguageModelCompletionError>>,
 642            LanguageModelCompletionError,
 643        >,
 644    >;
 645
 646    fn stream_completion_text(
 647        &self,
 648        request: LanguageModelRequest,
 649        cx: &AsyncApp,
 650    ) -> BoxFuture<'static, Result<LanguageModelTextStream, LanguageModelCompletionError>> {
 651        let future = self.stream_completion(request, cx);
 652
 653        async move {
 654            let events = future.await?;
 655            let mut events = events.fuse();
 656            let mut message_id = None;
 657            let mut first_item_text = None;
 658            let last_token_usage = Arc::new(Mutex::new(TokenUsage::default()));
 659
 660            if let Some(first_event) = events.next().await {
 661                match first_event {
 662                    Ok(LanguageModelCompletionEvent::StartMessage { message_id: id }) => {
 663                        message_id = Some(id);
 664                    }
 665                    Ok(LanguageModelCompletionEvent::Text(text)) => {
 666                        first_item_text = Some(text);
 667                    }
 668                    _ => (),
 669                }
 670            }
 671
 672            let stream = futures::stream::iter(first_item_text.map(Ok))
 673                .chain(events.filter_map({
 674                    let last_token_usage = last_token_usage.clone();
 675                    move |result| {
 676                        let last_token_usage = last_token_usage.clone();
 677                        async move {
 678                            match result {
 679                                Ok(LanguageModelCompletionEvent::Queued { .. }) => None,
 680                                Ok(LanguageModelCompletionEvent::Started) => None,
 681                                Ok(LanguageModelCompletionEvent::UsageUpdated { .. }) => None,
 682                                Ok(LanguageModelCompletionEvent::ToolUseLimitReached) => None,
 683                                Ok(LanguageModelCompletionEvent::StartMessage { .. }) => None,
 684                                Ok(LanguageModelCompletionEvent::Text(text)) => Some(Ok(text)),
 685                                Ok(LanguageModelCompletionEvent::Thinking { .. }) => None,
 686                                Ok(LanguageModelCompletionEvent::RedactedThinking { .. }) => None,
 687                                Ok(LanguageModelCompletionEvent::ReasoningDetails(_)) => None,
 688                                Ok(LanguageModelCompletionEvent::Stop(_)) => None,
 689                                Ok(LanguageModelCompletionEvent::ToolUse(_)) => None,
 690                                Ok(LanguageModelCompletionEvent::ToolUseJsonParseError {
 691                                    ..
 692                                }) => None,
 693                                Ok(LanguageModelCompletionEvent::UsageUpdate(token_usage)) => {
 694                                    *last_token_usage.lock() = token_usage;
 695                                    None
 696                                }
 697                                Err(err) => Some(Err(err)),
 698                            }
 699                        }
 700                    }
 701                }))
 702                .boxed();
 703
 704            Ok(LanguageModelTextStream {
 705                message_id,
 706                stream,
 707                last_token_usage,
 708            })
 709        }
 710        .boxed()
 711    }
 712
 713    fn stream_completion_tool(
 714        &self,
 715        request: LanguageModelRequest,
 716        cx: &AsyncApp,
 717    ) -> BoxFuture<'static, Result<LanguageModelToolUse, LanguageModelCompletionError>> {
 718        let future = self.stream_completion(request, cx);
 719
 720        async move {
 721            let events = future.await?;
 722            let mut events = events.fuse();
 723
 724            // Iterate through events until we find a complete ToolUse
 725            while let Some(event) = events.next().await {
 726                match event {
 727                    Ok(LanguageModelCompletionEvent::ToolUse(tool_use))
 728                        if tool_use.is_input_complete =>
 729                    {
 730                        return Ok(tool_use);
 731                    }
 732                    Err(err) => {
 733                        return Err(err);
 734                    }
 735                    _ => {}
 736                }
 737            }
 738
 739            // Stream ended without a complete tool use
 740            Err(LanguageModelCompletionError::Other(anyhow::anyhow!(
 741                "Stream ended without receiving a complete tool use"
 742            )))
 743        }
 744        .boxed()
 745    }
 746
 747    fn cache_configuration(&self) -> Option<LanguageModelCacheConfiguration> {
 748        None
 749    }
 750
 751    #[cfg(any(test, feature = "test-support"))]
 752    fn as_fake(&self) -> &fake_provider::FakeLanguageModel {
 753        unimplemented!()
 754    }
 755}
 756
 757pub trait LanguageModelExt: LanguageModel {
 758    fn max_token_count_for_mode(&self, mode: CompletionMode) -> u64 {
 759        match mode {
 760            CompletionMode::Normal => self.max_token_count(),
 761            CompletionMode::Max => self
 762                .max_token_count_in_burn_mode()
 763                .unwrap_or_else(|| self.max_token_count()),
 764        }
 765    }
 766}
 767impl LanguageModelExt for dyn LanguageModel {}
 768
 769/// An error that occurred when trying to authenticate the language model provider.
 770#[derive(Debug, Error)]
 771pub enum AuthenticateError {
 772    #[error("connection refused")]
 773    ConnectionRefused,
 774    #[error("credentials not found")]
 775    CredentialsNotFound,
 776    #[error(transparent)]
 777    Other(#[from] anyhow::Error),
 778}
 779
 780pub trait LanguageModelProvider: 'static {
 781    fn id(&self) -> LanguageModelProviderId;
 782    fn name(&self) -> LanguageModelProviderName;
 783    fn icon(&self) -> IconName {
 784        IconName::ZedAssistant
 785    }
 786    fn default_model(&self, cx: &App) -> Option<Arc<dyn LanguageModel>>;
 787    fn default_fast_model(&self, cx: &App) -> Option<Arc<dyn LanguageModel>>;
 788    fn provided_models(&self, cx: &App) -> Vec<Arc<dyn LanguageModel>>;
 789    fn recommended_models(&self, _cx: &App) -> Vec<Arc<dyn LanguageModel>> {
 790        Vec::new()
 791    }
 792    fn is_authenticated(&self, cx: &App) -> bool;
 793    fn authenticate(&self, cx: &mut App) -> Task<Result<(), AuthenticateError>>;
 794    fn configuration_view(
 795        &self,
 796        target_agent: ConfigurationViewTargetAgent,
 797        window: &mut Window,
 798        cx: &mut App,
 799    ) -> AnyView;
 800    fn reset_credentials(&self, cx: &mut App) -> Task<Result<()>>;
 801}
 802
 803#[derive(Default, Clone)]
 804pub enum ConfigurationViewTargetAgent {
 805    #[default]
 806    ZedAgent,
 807    Other(SharedString),
 808}
 809
 810#[derive(PartialEq, Eq)]
 811pub enum LanguageModelProviderTosView {
 812    /// When there are some past interactions in the Agent Panel.
 813    ThreadEmptyState,
 814    /// When there are no past interactions in the Agent Panel.
 815    ThreadFreshStart,
 816    TextThreadPopup,
 817    Configuration,
 818}
 819
 820pub trait LanguageModelProviderState: 'static {
 821    type ObservableEntity;
 822
 823    fn observable_entity(&self) -> Option<gpui::Entity<Self::ObservableEntity>>;
 824
 825    fn subscribe<T: 'static>(
 826        &self,
 827        cx: &mut gpui::Context<T>,
 828        callback: impl Fn(&mut T, &mut gpui::Context<T>) + 'static,
 829    ) -> Option<gpui::Subscription> {
 830        let entity = self.observable_entity()?;
 831        Some(cx.observe(&entity, move |this, _, cx| {
 832            callback(this, cx);
 833        }))
 834    }
 835}
 836
 837#[derive(Clone, Eq, PartialEq, Hash, Debug, Ord, PartialOrd, Serialize, Deserialize)]
 838pub struct LanguageModelId(pub SharedString);
 839
 840#[derive(Clone, Eq, PartialEq, Hash, Debug, Ord, PartialOrd)]
 841pub struct LanguageModelName(pub SharedString);
 842
 843#[derive(Clone, Eq, PartialEq, Hash, Debug, Ord, PartialOrd)]
 844pub struct LanguageModelProviderId(pub SharedString);
 845
 846#[derive(Clone, Eq, PartialEq, Hash, Debug, Ord, PartialOrd)]
 847pub struct LanguageModelProviderName(pub SharedString);
 848
 849impl LanguageModelProviderId {
 850    pub const fn new(id: &'static str) -> Self {
 851        Self(SharedString::new_static(id))
 852    }
 853}
 854
 855impl LanguageModelProviderName {
 856    pub const fn new(id: &'static str) -> Self {
 857        Self(SharedString::new_static(id))
 858    }
 859}
 860
 861impl fmt::Display for LanguageModelProviderId {
 862    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
 863        write!(f, "{}", self.0)
 864    }
 865}
 866
 867impl fmt::Display for LanguageModelProviderName {
 868    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
 869        write!(f, "{}", self.0)
 870    }
 871}
 872
 873impl From<String> for LanguageModelId {
 874    fn from(value: String) -> Self {
 875        Self(SharedString::from(value))
 876    }
 877}
 878
 879impl From<String> for LanguageModelName {
 880    fn from(value: String) -> Self {
 881        Self(SharedString::from(value))
 882    }
 883}
 884
 885impl From<String> for LanguageModelProviderId {
 886    fn from(value: String) -> Self {
 887        Self(SharedString::from(value))
 888    }
 889}
 890
 891impl From<String> for LanguageModelProviderName {
 892    fn from(value: String) -> Self {
 893        Self(SharedString::from(value))
 894    }
 895}
 896
 897impl From<Arc<str>> for LanguageModelProviderId {
 898    fn from(value: Arc<str>) -> Self {
 899        Self(SharedString::from(value))
 900    }
 901}
 902
 903impl From<Arc<str>> for LanguageModelProviderName {
 904    fn from(value: Arc<str>) -> Self {
 905        Self(SharedString::from(value))
 906    }
 907}
 908
 909#[cfg(test)]
 910mod tests {
 911    use super::*;
 912
 913    #[test]
 914    fn test_from_cloud_failure_with_upstream_http_error() {
 915        let error = LanguageModelCompletionError::from_cloud_failure(
 916            String::from("anthropic").into(),
 917            "upstream_http_error".to_string(),
 918            r#"{"code":"upstream_http_error","message":"Received an error from the Anthropic API: upstream connect error or disconnect/reset before headers. reset reason: connection timeout","upstream_status":503}"#.to_string(),
 919            None,
 920        );
 921
 922        match error {
 923            LanguageModelCompletionError::ServerOverloaded { provider, .. } => {
 924                assert_eq!(provider.0, "anthropic");
 925            }
 926            _ => panic!(
 927                "Expected ServerOverloaded error for 503 status, got: {:?}",
 928                error
 929            ),
 930        }
 931
 932        let error = LanguageModelCompletionError::from_cloud_failure(
 933            String::from("anthropic").into(),
 934            "upstream_http_error".to_string(),
 935            r#"{"code":"upstream_http_error","message":"Internal server error","upstream_status":500}"#.to_string(),
 936            None,
 937        );
 938
 939        match error {
 940            LanguageModelCompletionError::ApiInternalServerError { provider, message } => {
 941                assert_eq!(provider.0, "anthropic");
 942                assert_eq!(message, "Internal server error");
 943            }
 944            _ => panic!(
 945                "Expected ApiInternalServerError for 500 status, got: {:?}",
 946                error
 947            ),
 948        }
 949    }
 950
 951    #[test]
 952    fn test_from_cloud_failure_with_standard_format() {
 953        let error = LanguageModelCompletionError::from_cloud_failure(
 954            String::from("anthropic").into(),
 955            "upstream_http_503".to_string(),
 956            "Service unavailable".to_string(),
 957            None,
 958        );
 959
 960        match error {
 961            LanguageModelCompletionError::ServerOverloaded { provider, .. } => {
 962                assert_eq!(provider.0, "anthropic");
 963            }
 964            _ => panic!("Expected ServerOverloaded error for upstream_http_503"),
 965        }
 966    }
 967
 968    #[test]
 969    fn test_upstream_http_error_connection_timeout() {
 970        let error = LanguageModelCompletionError::from_cloud_failure(
 971            String::from("anthropic").into(),
 972            "upstream_http_error".to_string(),
 973            r#"{"code":"upstream_http_error","message":"Received an error from the Anthropic API: upstream connect error or disconnect/reset before headers. reset reason: connection timeout","upstream_status":503}"#.to_string(),
 974            None,
 975        );
 976
 977        match error {
 978            LanguageModelCompletionError::ServerOverloaded { provider, .. } => {
 979                assert_eq!(provider.0, "anthropic");
 980            }
 981            _ => panic!(
 982                "Expected ServerOverloaded error for connection timeout with 503 status, got: {:?}",
 983                error
 984            ),
 985        }
 986
 987        let error = LanguageModelCompletionError::from_cloud_failure(
 988            String::from("anthropic").into(),
 989            "upstream_http_error".to_string(),
 990            r#"{"code":"upstream_http_error","message":"Received an error from the Anthropic API: upstream connect error or disconnect/reset before headers. reset reason: connection timeout","upstream_status":500}"#.to_string(),
 991            None,
 992        );
 993
 994        match error {
 995            LanguageModelCompletionError::ApiInternalServerError { provider, message } => {
 996                assert_eq!(provider.0, "anthropic");
 997                assert_eq!(
 998                    message,
 999                    "Received an error from the Anthropic API: upstream connect error or disconnect/reset before headers. reset reason: connection timeout"
1000                );
1001            }
1002            _ => panic!(
1003                "Expected ApiInternalServerError for connection timeout with 500 status, got: {:?}",
1004                error
1005            ),
1006        }
1007    }
1008
1009    #[test]
1010    fn test_language_model_tool_use_serializes_with_signature() {
1011        use serde_json::json;
1012
1013        let tool_use = LanguageModelToolUse {
1014            id: LanguageModelToolUseId::from("test_id"),
1015            name: "test_tool".into(),
1016            raw_input: json!({"arg": "value"}).to_string(),
1017            input: json!({"arg": "value"}),
1018            is_input_complete: true,
1019            thought_signature: Some("test_signature".to_string()),
1020        };
1021
1022        let serialized = serde_json::to_value(&tool_use).unwrap();
1023
1024        assert_eq!(serialized["id"], "test_id");
1025        assert_eq!(serialized["name"], "test_tool");
1026        assert_eq!(serialized["thought_signature"], "test_signature");
1027    }
1028
1029    #[test]
1030    fn test_language_model_tool_use_deserializes_with_missing_signature() {
1031        use serde_json::json;
1032
1033        let json = json!({
1034            "id": "test_id",
1035            "name": "test_tool",
1036            "raw_input": "{\"arg\":\"value\"}",
1037            "input": {"arg": "value"},
1038            "is_input_complete": true
1039        });
1040
1041        let tool_use: LanguageModelToolUse = serde_json::from_value(json).unwrap();
1042
1043        assert_eq!(tool_use.id, LanguageModelToolUseId::from("test_id"));
1044        assert_eq!(tool_use.name.as_ref(), "test_tool");
1045        assert_eq!(tool_use.thought_signature, None);
1046    }
1047
1048    #[test]
1049    fn test_language_model_tool_use_round_trip_with_signature() {
1050        use serde_json::json;
1051
1052        let original = LanguageModelToolUse {
1053            id: LanguageModelToolUseId::from("round_trip_id"),
1054            name: "round_trip_tool".into(),
1055            raw_input: json!({"key": "value"}).to_string(),
1056            input: json!({"key": "value"}),
1057            is_input_complete: true,
1058            thought_signature: Some("round_trip_sig".to_string()),
1059        };
1060
1061        let serialized = serde_json::to_value(&original).unwrap();
1062        let deserialized: LanguageModelToolUse = serde_json::from_value(serialized).unwrap();
1063
1064        assert_eq!(deserialized.id, original.id);
1065        assert_eq!(deserialized.name, original.name);
1066        assert_eq!(deserialized.thought_signature, original.thought_signature);
1067    }
1068
1069    #[test]
1070    fn test_language_model_tool_use_round_trip_without_signature() {
1071        use serde_json::json;
1072
1073        let original = LanguageModelToolUse {
1074            id: LanguageModelToolUseId::from("no_sig_id"),
1075            name: "no_sig_tool".into(),
1076            raw_input: json!({"arg": "value"}).to_string(),
1077            input: json!({"arg": "value"}),
1078            is_input_complete: true,
1079            thought_signature: None,
1080        };
1081
1082        let serialized = serde_json::to_value(&original).unwrap();
1083        let deserialized: LanguageModelToolUse = serde_json::from_value(serialized).unwrap();
1084
1085        assert_eq!(deserialized.id, original.id);
1086        assert_eq!(deserialized.name, original.name);
1087        assert_eq!(deserialized.thought_signature, None);
1088    }
1089}