1mod api_key;
2mod model;
3mod rate_limiter;
4mod registry;
5mod request;
6mod role;
7mod telemetry;
8pub mod tool_schema;
9
10#[cfg(any(test, feature = "test-support"))]
11pub mod fake_provider;
12
13use anthropic::{AnthropicError, parse_prompt_too_long};
14use anyhow::{Result, anyhow};
15use client::Client;
16use cloud_llm_client::{CompletionMode, CompletionRequestStatus};
17use futures::FutureExt;
18use futures::{StreamExt, future::BoxFuture, stream::BoxStream};
19use gpui::{AnyView, App, AsyncApp, SharedString, Task, Window};
20use http_client::{StatusCode, http};
21use icons::IconName;
22use open_router::OpenRouterError;
23use parking_lot::Mutex;
24use serde::{Deserialize, Serialize};
25pub use settings::LanguageModelCacheConfiguration;
26use std::ops::{Add, Sub};
27use std::str::FromStr;
28use std::sync::Arc;
29use std::time::Duration;
30use std::{fmt, io};
31use thiserror::Error;
32use util::serde::is_default;
33
34pub use crate::api_key::{ApiKey, ApiKeyState};
35pub use crate::model::*;
36pub use crate::rate_limiter::*;
37pub use crate::registry::*;
38pub use crate::request::*;
39pub use crate::role::*;
40pub use crate::telemetry::*;
41pub use crate::tool_schema::LanguageModelToolSchemaFormat;
42pub use zed_env_vars::{EnvVar, env_var};
43
44pub const ANTHROPIC_PROVIDER_ID: LanguageModelProviderId =
45 LanguageModelProviderId::new("anthropic");
46pub const ANTHROPIC_PROVIDER_NAME: LanguageModelProviderName =
47 LanguageModelProviderName::new("Anthropic");
48
49pub const GOOGLE_PROVIDER_ID: LanguageModelProviderId = LanguageModelProviderId::new("google");
50pub const GOOGLE_PROVIDER_NAME: LanguageModelProviderName =
51 LanguageModelProviderName::new("Google AI");
52
53pub const OPEN_AI_PROVIDER_ID: LanguageModelProviderId = LanguageModelProviderId::new("openai");
54pub const OPEN_AI_PROVIDER_NAME: LanguageModelProviderName =
55 LanguageModelProviderName::new("OpenAI");
56
57pub const X_AI_PROVIDER_ID: LanguageModelProviderId = LanguageModelProviderId::new("x_ai");
58pub const X_AI_PROVIDER_NAME: LanguageModelProviderName = LanguageModelProviderName::new("xAI");
59
60pub const ZED_CLOUD_PROVIDER_ID: LanguageModelProviderId = LanguageModelProviderId::new("zed.dev");
61pub const ZED_CLOUD_PROVIDER_NAME: LanguageModelProviderName =
62 LanguageModelProviderName::new("Zed");
63
64pub fn init(client: Arc<Client>, cx: &mut App) {
65 init_settings(cx);
66 RefreshLlmTokenListener::register(client, cx);
67}
68
69pub fn init_settings(cx: &mut App) {
70 registry::init(cx);
71}
72
73/// A completion event from a language model.
74#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)]
75pub enum LanguageModelCompletionEvent {
76 Queued {
77 position: usize,
78 },
79 Started,
80 Stop(StopReason),
81 Text(String),
82 Thinking {
83 text: String,
84 signature: Option<String>,
85 },
86 RedactedThinking {
87 data: String,
88 },
89 ToolUse(LanguageModelToolUse),
90 ToolUseJsonParseError {
91 id: LanguageModelToolUseId,
92 tool_name: Arc<str>,
93 raw_input: Arc<str>,
94 json_parse_error: String,
95 },
96 StartMessage {
97 message_id: String,
98 },
99 ReasoningDetails(serde_json::Value),
100 UsageUpdate(TokenUsage),
101}
102
103impl LanguageModelCompletionEvent {
104 pub fn from_completion_request_status(
105 status: CompletionRequestStatus,
106 upstream_provider: LanguageModelProviderName,
107 ) -> Result<Self, LanguageModelCompletionError> {
108 match status {
109 CompletionRequestStatus::Queued { position } => {
110 Ok(LanguageModelCompletionEvent::Queued { position })
111 }
112 CompletionRequestStatus::Started => Ok(LanguageModelCompletionEvent::Started),
113 CompletionRequestStatus::UsageUpdated { .. }
114 | CompletionRequestStatus::ToolUseLimitReached => Err(
115 LanguageModelCompletionError::Other(anyhow!("Unexpected status: {status:?}")),
116 ),
117 CompletionRequestStatus::Failed {
118 code,
119 message,
120 request_id: _,
121 retry_after,
122 } => Err(LanguageModelCompletionError::from_cloud_failure(
123 upstream_provider,
124 code,
125 message,
126 retry_after.map(Duration::from_secs_f64),
127 )),
128 }
129 }
130}
131
132#[derive(Error, Debug)]
133pub enum LanguageModelCompletionError {
134 #[error("prompt too large for context window")]
135 PromptTooLarge { tokens: Option<u64> },
136 #[error("missing {provider} API key")]
137 NoApiKey { provider: LanguageModelProviderName },
138 #[error("{provider}'s API rate limit exceeded")]
139 RateLimitExceeded {
140 provider: LanguageModelProviderName,
141 retry_after: Option<Duration>,
142 },
143 #[error("{provider}'s API servers are overloaded right now")]
144 ServerOverloaded {
145 provider: LanguageModelProviderName,
146 retry_after: Option<Duration>,
147 },
148 #[error("{provider}'s API server reported an internal server error: {message}")]
149 ApiInternalServerError {
150 provider: LanguageModelProviderName,
151 message: String,
152 },
153 #[error("{message}")]
154 UpstreamProviderError {
155 message: String,
156 status: StatusCode,
157 retry_after: Option<Duration>,
158 },
159 #[error("HTTP response error from {provider}'s API: status {status_code} - {message:?}")]
160 HttpResponseError {
161 provider: LanguageModelProviderName,
162 status_code: StatusCode,
163 message: String,
164 },
165
166 // Client errors
167 #[error("invalid request format to {provider}'s API: {message}")]
168 BadRequestFormat {
169 provider: LanguageModelProviderName,
170 message: String,
171 },
172 #[error("authentication error with {provider}'s API: {message}")]
173 AuthenticationError {
174 provider: LanguageModelProviderName,
175 message: String,
176 },
177 #[error("Permission error with {provider}'s API: {message}")]
178 PermissionError {
179 provider: LanguageModelProviderName,
180 message: String,
181 },
182 #[error("language model provider API endpoint not found")]
183 ApiEndpointNotFound { provider: LanguageModelProviderName },
184 #[error("I/O error reading response from {provider}'s API")]
185 ApiReadResponseError {
186 provider: LanguageModelProviderName,
187 #[source]
188 error: io::Error,
189 },
190 #[error("error serializing request to {provider} API")]
191 SerializeRequest {
192 provider: LanguageModelProviderName,
193 #[source]
194 error: serde_json::Error,
195 },
196 #[error("error building request body to {provider} API")]
197 BuildRequestBody {
198 provider: LanguageModelProviderName,
199 #[source]
200 error: http::Error,
201 },
202 #[error("error sending HTTP request to {provider} API")]
203 HttpSend {
204 provider: LanguageModelProviderName,
205 #[source]
206 error: anyhow::Error,
207 },
208 #[error("error deserializing {provider} API response")]
209 DeserializeResponse {
210 provider: LanguageModelProviderName,
211 #[source]
212 error: serde_json::Error,
213 },
214
215 // TODO: Ideally this would be removed in favor of having a comprehensive list of errors.
216 #[error(transparent)]
217 Other(#[from] anyhow::Error),
218}
219
220impl LanguageModelCompletionError {
221 fn parse_upstream_error_json(message: &str) -> Option<(StatusCode, String)> {
222 let error_json = serde_json::from_str::<serde_json::Value>(message).ok()?;
223 let upstream_status = error_json
224 .get("upstream_status")
225 .and_then(|v| v.as_u64())
226 .and_then(|status| u16::try_from(status).ok())
227 .and_then(|status| StatusCode::from_u16(status).ok())?;
228 let inner_message = error_json
229 .get("message")
230 .and_then(|v| v.as_str())
231 .unwrap_or(message)
232 .to_string();
233 Some((upstream_status, inner_message))
234 }
235
236 pub fn from_cloud_failure(
237 upstream_provider: LanguageModelProviderName,
238 code: String,
239 message: String,
240 retry_after: Option<Duration>,
241 ) -> Self {
242 if let Some(tokens) = parse_prompt_too_long(&message) {
243 // TODO: currently Anthropic PAYLOAD_TOO_LARGE response may cause INTERNAL_SERVER_ERROR
244 // to be reported. This is a temporary workaround to handle this in the case where the
245 // token limit has been exceeded.
246 Self::PromptTooLarge {
247 tokens: Some(tokens),
248 }
249 } else if code == "upstream_http_error" {
250 if let Some((upstream_status, inner_message)) =
251 Self::parse_upstream_error_json(&message)
252 {
253 return Self::from_http_status(
254 upstream_provider,
255 upstream_status,
256 inner_message,
257 retry_after,
258 );
259 }
260 anyhow!("completion request failed, code: {code}, message: {message}").into()
261 } else if let Some(status_code) = code
262 .strip_prefix("upstream_http_")
263 .and_then(|code| StatusCode::from_str(code).ok())
264 {
265 Self::from_http_status(upstream_provider, status_code, message, retry_after)
266 } else if let Some(status_code) = code
267 .strip_prefix("http_")
268 .and_then(|code| StatusCode::from_str(code).ok())
269 {
270 Self::from_http_status(ZED_CLOUD_PROVIDER_NAME, status_code, message, retry_after)
271 } else {
272 anyhow!("completion request failed, code: {code}, message: {message}").into()
273 }
274 }
275
276 pub fn from_http_status(
277 provider: LanguageModelProviderName,
278 status_code: StatusCode,
279 message: String,
280 retry_after: Option<Duration>,
281 ) -> Self {
282 match status_code {
283 StatusCode::BAD_REQUEST => Self::BadRequestFormat { provider, message },
284 StatusCode::UNAUTHORIZED => Self::AuthenticationError { provider, message },
285 StatusCode::FORBIDDEN => Self::PermissionError { provider, message },
286 StatusCode::NOT_FOUND => Self::ApiEndpointNotFound { provider },
287 StatusCode::PAYLOAD_TOO_LARGE => Self::PromptTooLarge {
288 tokens: parse_prompt_too_long(&message),
289 },
290 StatusCode::TOO_MANY_REQUESTS => Self::RateLimitExceeded {
291 provider,
292 retry_after,
293 },
294 StatusCode::INTERNAL_SERVER_ERROR => Self::ApiInternalServerError { provider, message },
295 StatusCode::SERVICE_UNAVAILABLE => Self::ServerOverloaded {
296 provider,
297 retry_after,
298 },
299 _ if status_code.as_u16() == 529 => Self::ServerOverloaded {
300 provider,
301 retry_after,
302 },
303 _ => Self::HttpResponseError {
304 provider,
305 status_code,
306 message,
307 },
308 }
309 }
310}
311
312impl From<AnthropicError> for LanguageModelCompletionError {
313 fn from(error: AnthropicError) -> Self {
314 let provider = ANTHROPIC_PROVIDER_NAME;
315 match error {
316 AnthropicError::SerializeRequest(error) => Self::SerializeRequest { provider, error },
317 AnthropicError::BuildRequestBody(error) => Self::BuildRequestBody { provider, error },
318 AnthropicError::HttpSend(error) => Self::HttpSend { provider, error },
319 AnthropicError::DeserializeResponse(error) => {
320 Self::DeserializeResponse { provider, error }
321 }
322 AnthropicError::ReadResponse(error) => Self::ApiReadResponseError { provider, error },
323 AnthropicError::HttpResponseError {
324 status_code,
325 message,
326 } => Self::HttpResponseError {
327 provider,
328 status_code,
329 message,
330 },
331 AnthropicError::RateLimit { retry_after } => Self::RateLimitExceeded {
332 provider,
333 retry_after: Some(retry_after),
334 },
335 AnthropicError::ServerOverloaded { retry_after } => Self::ServerOverloaded {
336 provider,
337 retry_after,
338 },
339 AnthropicError::ApiError(api_error) => api_error.into(),
340 }
341 }
342}
343
344impl From<anthropic::ApiError> for LanguageModelCompletionError {
345 fn from(error: anthropic::ApiError) -> Self {
346 use anthropic::ApiErrorCode::*;
347 let provider = ANTHROPIC_PROVIDER_NAME;
348 match error.code() {
349 Some(code) => match code {
350 InvalidRequestError => Self::BadRequestFormat {
351 provider,
352 message: error.message,
353 },
354 AuthenticationError => Self::AuthenticationError {
355 provider,
356 message: error.message,
357 },
358 PermissionError => Self::PermissionError {
359 provider,
360 message: error.message,
361 },
362 NotFoundError => Self::ApiEndpointNotFound { provider },
363 RequestTooLarge => Self::PromptTooLarge {
364 tokens: parse_prompt_too_long(&error.message),
365 },
366 RateLimitError => Self::RateLimitExceeded {
367 provider,
368 retry_after: None,
369 },
370 ApiError => Self::ApiInternalServerError {
371 provider,
372 message: error.message,
373 },
374 OverloadedError => Self::ServerOverloaded {
375 provider,
376 retry_after: None,
377 },
378 },
379 None => Self::Other(error.into()),
380 }
381 }
382}
383
384impl From<open_ai::RequestError> for LanguageModelCompletionError {
385 fn from(error: open_ai::RequestError) -> Self {
386 match error {
387 open_ai::RequestError::HttpResponseError {
388 provider,
389 status_code,
390 body,
391 headers,
392 } => {
393 let retry_after = headers
394 .get(http::header::RETRY_AFTER)
395 .and_then(|val| val.to_str().ok()?.parse::<u64>().ok())
396 .map(Duration::from_secs);
397
398 Self::from_http_status(provider.into(), status_code, body, retry_after)
399 }
400 open_ai::RequestError::Other(e) => Self::Other(e),
401 }
402 }
403}
404
405impl From<OpenRouterError> for LanguageModelCompletionError {
406 fn from(error: OpenRouterError) -> Self {
407 let provider = LanguageModelProviderName::new("OpenRouter");
408 match error {
409 OpenRouterError::SerializeRequest(error) => Self::SerializeRequest { provider, error },
410 OpenRouterError::BuildRequestBody(error) => Self::BuildRequestBody { provider, error },
411 OpenRouterError::HttpSend(error) => Self::HttpSend { provider, error },
412 OpenRouterError::DeserializeResponse(error) => {
413 Self::DeserializeResponse { provider, error }
414 }
415 OpenRouterError::ReadResponse(error) => Self::ApiReadResponseError { provider, error },
416 OpenRouterError::RateLimit { retry_after } => Self::RateLimitExceeded {
417 provider,
418 retry_after: Some(retry_after),
419 },
420 OpenRouterError::ServerOverloaded { retry_after } => Self::ServerOverloaded {
421 provider,
422 retry_after,
423 },
424 OpenRouterError::ApiError(api_error) => api_error.into(),
425 }
426 }
427}
428
429impl From<open_router::ApiError> for LanguageModelCompletionError {
430 fn from(error: open_router::ApiError) -> Self {
431 use open_router::ApiErrorCode::*;
432 let provider = LanguageModelProviderName::new("OpenRouter");
433 match error.code {
434 InvalidRequestError => Self::BadRequestFormat {
435 provider,
436 message: error.message,
437 },
438 AuthenticationError => Self::AuthenticationError {
439 provider,
440 message: error.message,
441 },
442 PaymentRequiredError => Self::AuthenticationError {
443 provider,
444 message: format!("Payment required: {}", error.message),
445 },
446 PermissionError => Self::PermissionError {
447 provider,
448 message: error.message,
449 },
450 RequestTimedOut => Self::HttpResponseError {
451 provider,
452 status_code: StatusCode::REQUEST_TIMEOUT,
453 message: error.message,
454 },
455 RateLimitError => Self::RateLimitExceeded {
456 provider,
457 retry_after: None,
458 },
459 ApiError => Self::ApiInternalServerError {
460 provider,
461 message: error.message,
462 },
463 OverloadedError => Self::ServerOverloaded {
464 provider,
465 retry_after: None,
466 },
467 }
468 }
469}
470
471#[derive(Debug, PartialEq, Clone, Copy, Serialize, Deserialize)]
472#[serde(rename_all = "snake_case")]
473pub enum StopReason {
474 EndTurn,
475 MaxTokens,
476 ToolUse,
477 Refusal,
478}
479
480#[derive(Debug, PartialEq, Clone, Copy, Serialize, Deserialize, Default)]
481pub struct TokenUsage {
482 #[serde(default, skip_serializing_if = "is_default")]
483 pub input_tokens: u64,
484 #[serde(default, skip_serializing_if = "is_default")]
485 pub output_tokens: u64,
486 #[serde(default, skip_serializing_if = "is_default")]
487 pub cache_creation_input_tokens: u64,
488 #[serde(default, skip_serializing_if = "is_default")]
489 pub cache_read_input_tokens: u64,
490}
491
492impl TokenUsage {
493 pub fn total_tokens(&self) -> u64 {
494 self.input_tokens
495 + self.output_tokens
496 + self.cache_read_input_tokens
497 + self.cache_creation_input_tokens
498 }
499}
500
501impl Add<TokenUsage> for TokenUsage {
502 type Output = Self;
503
504 fn add(self, other: Self) -> Self {
505 Self {
506 input_tokens: self.input_tokens + other.input_tokens,
507 output_tokens: self.output_tokens + other.output_tokens,
508 cache_creation_input_tokens: self.cache_creation_input_tokens
509 + other.cache_creation_input_tokens,
510 cache_read_input_tokens: self.cache_read_input_tokens + other.cache_read_input_tokens,
511 }
512 }
513}
514
515impl Sub<TokenUsage> for TokenUsage {
516 type Output = Self;
517
518 fn sub(self, other: Self) -> Self {
519 Self {
520 input_tokens: self.input_tokens - other.input_tokens,
521 output_tokens: self.output_tokens - other.output_tokens,
522 cache_creation_input_tokens: self.cache_creation_input_tokens
523 - other.cache_creation_input_tokens,
524 cache_read_input_tokens: self.cache_read_input_tokens - other.cache_read_input_tokens,
525 }
526 }
527}
528
529#[derive(Debug, PartialEq, Eq, Hash, Clone, Serialize, Deserialize)]
530pub struct LanguageModelToolUseId(Arc<str>);
531
532impl fmt::Display for LanguageModelToolUseId {
533 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
534 write!(f, "{}", self.0)
535 }
536}
537
538impl<T> From<T> for LanguageModelToolUseId
539where
540 T: Into<Arc<str>>,
541{
542 fn from(value: T) -> Self {
543 Self(value.into())
544 }
545}
546
547#[derive(Debug, PartialEq, Eq, Hash, Clone, Serialize, Deserialize)]
548pub struct LanguageModelToolUse {
549 pub id: LanguageModelToolUseId,
550 pub name: Arc<str>,
551 pub raw_input: String,
552 pub input: serde_json::Value,
553 pub is_input_complete: bool,
554 /// Thought signature the model sent us. Some models require that this
555 /// signature be preserved and sent back in conversation history for validation.
556 pub thought_signature: Option<String>,
557}
558
559pub struct LanguageModelTextStream {
560 pub message_id: Option<String>,
561 pub stream: BoxStream<'static, Result<String, LanguageModelCompletionError>>,
562 // Has complete token usage after the stream has finished
563 pub last_token_usage: Arc<Mutex<TokenUsage>>,
564}
565
566impl Default for LanguageModelTextStream {
567 fn default() -> Self {
568 Self {
569 message_id: None,
570 stream: Box::pin(futures::stream::empty()),
571 last_token_usage: Arc::new(Mutex::new(TokenUsage::default())),
572 }
573 }
574}
575
576pub trait LanguageModel: Send + Sync {
577 fn id(&self) -> LanguageModelId;
578 fn name(&self) -> LanguageModelName;
579 fn provider_id(&self) -> LanguageModelProviderId;
580 fn provider_name(&self) -> LanguageModelProviderName;
581 fn upstream_provider_id(&self) -> LanguageModelProviderId {
582 self.provider_id()
583 }
584 fn upstream_provider_name(&self) -> LanguageModelProviderName {
585 self.provider_name()
586 }
587
588 fn telemetry_id(&self) -> String;
589
590 fn api_key(&self, _cx: &App) -> Option<String> {
591 None
592 }
593
594 /// Whether this model supports images
595 fn supports_images(&self) -> bool;
596
597 /// Whether this model supports tools.
598 fn supports_tools(&self) -> bool;
599
600 /// Whether this model supports choosing which tool to use.
601 fn supports_tool_choice(&self, choice: LanguageModelToolChoice) -> bool;
602
603 /// Returns whether this model supports "burn mode";
604 fn supports_burn_mode(&self) -> bool {
605 false
606 }
607
608 /// Returns whether this model or provider supports streaming tool calls;
609 fn supports_streaming_tools(&self) -> bool {
610 false
611 }
612
613 /// Returns whether this model/provider reports accurate split input/output token counts.
614 /// When true, the UI may show separate input/output token indicators.
615 fn supports_split_token_display(&self) -> bool {
616 false
617 }
618
619 fn tool_input_format(&self) -> LanguageModelToolSchemaFormat {
620 LanguageModelToolSchemaFormat::JsonSchema
621 }
622
623 fn max_token_count(&self) -> u64;
624 /// Returns the maximum token count for this model in burn mode (If `supports_burn_mode` is `false` this returns `None`)
625 fn max_token_count_in_burn_mode(&self) -> Option<u64> {
626 None
627 }
628 fn max_output_tokens(&self) -> Option<u64> {
629 None
630 }
631
632 fn count_tokens(
633 &self,
634 request: LanguageModelRequest,
635 cx: &App,
636 ) -> BoxFuture<'static, Result<u64>>;
637
638 fn stream_completion(
639 &self,
640 request: LanguageModelRequest,
641 cx: &AsyncApp,
642 ) -> BoxFuture<
643 'static,
644 Result<
645 BoxStream<'static, Result<LanguageModelCompletionEvent, LanguageModelCompletionError>>,
646 LanguageModelCompletionError,
647 >,
648 >;
649
650 fn stream_completion_text(
651 &self,
652 request: LanguageModelRequest,
653 cx: &AsyncApp,
654 ) -> BoxFuture<'static, Result<LanguageModelTextStream, LanguageModelCompletionError>> {
655 let future = self.stream_completion(request, cx);
656
657 async move {
658 let events = future.await?;
659 let mut events = events.fuse();
660 let mut message_id = None;
661 let mut first_item_text = None;
662 let last_token_usage = Arc::new(Mutex::new(TokenUsage::default()));
663
664 if let Some(first_event) = events.next().await {
665 match first_event {
666 Ok(LanguageModelCompletionEvent::StartMessage { message_id: id }) => {
667 message_id = Some(id);
668 }
669 Ok(LanguageModelCompletionEvent::Text(text)) => {
670 first_item_text = Some(text);
671 }
672 _ => (),
673 }
674 }
675
676 let stream = futures::stream::iter(first_item_text.map(Ok))
677 .chain(events.filter_map({
678 let last_token_usage = last_token_usage.clone();
679 move |result| {
680 let last_token_usage = last_token_usage.clone();
681 async move {
682 match result {
683 Ok(LanguageModelCompletionEvent::Queued { .. }) => None,
684 Ok(LanguageModelCompletionEvent::Started) => None,
685 Ok(LanguageModelCompletionEvent::StartMessage { .. }) => None,
686 Ok(LanguageModelCompletionEvent::Text(text)) => Some(Ok(text)),
687 Ok(LanguageModelCompletionEvent::Thinking { .. }) => None,
688 Ok(LanguageModelCompletionEvent::RedactedThinking { .. }) => None,
689 Ok(LanguageModelCompletionEvent::ReasoningDetails(_)) => None,
690 Ok(LanguageModelCompletionEvent::Stop(_)) => None,
691 Ok(LanguageModelCompletionEvent::ToolUse(_)) => None,
692 Ok(LanguageModelCompletionEvent::ToolUseJsonParseError {
693 ..
694 }) => None,
695 Ok(LanguageModelCompletionEvent::UsageUpdate(token_usage)) => {
696 *last_token_usage.lock() = token_usage;
697 None
698 }
699 Err(err) => Some(Err(err)),
700 }
701 }
702 }
703 }))
704 .boxed();
705
706 Ok(LanguageModelTextStream {
707 message_id,
708 stream,
709 last_token_usage,
710 })
711 }
712 .boxed()
713 }
714
715 fn stream_completion_tool(
716 &self,
717 request: LanguageModelRequest,
718 cx: &AsyncApp,
719 ) -> BoxFuture<'static, Result<LanguageModelToolUse, LanguageModelCompletionError>> {
720 let future = self.stream_completion(request, cx);
721
722 async move {
723 let events = future.await?;
724 let mut events = events.fuse();
725
726 // Iterate through events until we find a complete ToolUse
727 while let Some(event) = events.next().await {
728 match event {
729 Ok(LanguageModelCompletionEvent::ToolUse(tool_use))
730 if tool_use.is_input_complete =>
731 {
732 return Ok(tool_use);
733 }
734 Err(err) => {
735 return Err(err);
736 }
737 _ => {}
738 }
739 }
740
741 // Stream ended without a complete tool use
742 Err(LanguageModelCompletionError::Other(anyhow::anyhow!(
743 "Stream ended without receiving a complete tool use"
744 )))
745 }
746 .boxed()
747 }
748
749 fn cache_configuration(&self) -> Option<LanguageModelCacheConfiguration> {
750 None
751 }
752
753 #[cfg(any(test, feature = "test-support"))]
754 fn as_fake(&self) -> &fake_provider::FakeLanguageModel {
755 unimplemented!()
756 }
757}
758
759pub trait LanguageModelExt: LanguageModel {
760 fn max_token_count_for_mode(&self, mode: CompletionMode) -> u64 {
761 match mode {
762 CompletionMode::Normal => self.max_token_count(),
763 CompletionMode::Max => self
764 .max_token_count_in_burn_mode()
765 .unwrap_or_else(|| self.max_token_count()),
766 }
767 }
768}
769impl LanguageModelExt for dyn LanguageModel {}
770
771impl std::fmt::Debug for dyn LanguageModel {
772 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
773 f.debug_struct("<dyn LanguageModel>")
774 .field("id", &self.id())
775 .field("name", &self.name())
776 .field("provider_id", &self.provider_id())
777 .field("provider_name", &self.provider_name())
778 .field("upstream_provider_name", &self.upstream_provider_name())
779 .field("upstream_provider_id", &self.upstream_provider_id())
780 .field("upstream_provider_id", &self.upstream_provider_id())
781 .field("supports_streaming_tools", &self.supports_streaming_tools())
782 .finish()
783 }
784}
785
786/// An error that occurred when trying to authenticate the language model provider.
787#[derive(Debug, Error)]
788pub enum AuthenticateError {
789 #[error("connection refused")]
790 ConnectionRefused,
791 #[error("credentials not found")]
792 CredentialsNotFound,
793 #[error(transparent)]
794 Other(#[from] anyhow::Error),
795}
796
797/// Either a built-in icon name or a path to an external SVG.
798#[derive(Debug, Clone, PartialEq, Eq)]
799pub enum IconOrSvg {
800 /// A built-in icon from Zed's icon set.
801 Icon(IconName),
802 /// Path to a custom SVG icon file.
803 Svg(SharedString),
804}
805
806impl Default for IconOrSvg {
807 fn default() -> Self {
808 Self::Icon(IconName::ZedAssistant)
809 }
810}
811
812pub trait LanguageModelProvider: 'static {
813 fn id(&self) -> LanguageModelProviderId;
814 fn name(&self) -> LanguageModelProviderName;
815 fn icon(&self) -> IconOrSvg {
816 IconOrSvg::default()
817 }
818 fn default_model(&self, cx: &App) -> Option<Arc<dyn LanguageModel>>;
819 fn default_fast_model(&self, cx: &App) -> Option<Arc<dyn LanguageModel>>;
820 fn provided_models(&self, cx: &App) -> Vec<Arc<dyn LanguageModel>>;
821 fn recommended_models(&self, _cx: &App) -> Vec<Arc<dyn LanguageModel>> {
822 Vec::new()
823 }
824 fn is_authenticated(&self, cx: &App) -> bool;
825 fn authenticate(&self, cx: &mut App) -> Task<Result<(), AuthenticateError>>;
826 fn configuration_view(
827 &self,
828 target_agent: ConfigurationViewTargetAgent,
829 window: &mut Window,
830 cx: &mut App,
831 ) -> AnyView;
832 fn reset_credentials(&self, cx: &mut App) -> Task<Result<()>>;
833}
834
835#[derive(Default, Clone, PartialEq, Eq)]
836pub enum ConfigurationViewTargetAgent {
837 #[default]
838 ZedAgent,
839 Other(SharedString),
840}
841
842#[derive(PartialEq, Eq)]
843pub enum LanguageModelProviderTosView {
844 /// When there are some past interactions in the Agent Panel.
845 ThreadEmptyState,
846 /// When there are no past interactions in the Agent Panel.
847 ThreadFreshStart,
848 TextThreadPopup,
849 Configuration,
850}
851
852pub trait LanguageModelProviderState: 'static {
853 type ObservableEntity;
854
855 fn observable_entity(&self) -> Option<gpui::Entity<Self::ObservableEntity>>;
856
857 fn subscribe<T: 'static>(
858 &self,
859 cx: &mut gpui::Context<T>,
860 callback: impl Fn(&mut T, &mut gpui::Context<T>) + 'static,
861 ) -> Option<gpui::Subscription> {
862 let entity = self.observable_entity()?;
863 Some(cx.observe(&entity, move |this, _, cx| {
864 callback(this, cx);
865 }))
866 }
867}
868
869#[derive(Clone, Eq, PartialEq, Hash, Debug, Ord, PartialOrd, Serialize, Deserialize)]
870pub struct LanguageModelId(pub SharedString);
871
872#[derive(Clone, Eq, PartialEq, Hash, Debug, Ord, PartialOrd)]
873pub struct LanguageModelName(pub SharedString);
874
875#[derive(Clone, Eq, PartialEq, Hash, Debug, Ord, PartialOrd)]
876pub struct LanguageModelProviderId(pub SharedString);
877
878#[derive(Clone, Eq, PartialEq, Hash, Debug, Ord, PartialOrd)]
879pub struct LanguageModelProviderName(pub SharedString);
880
881impl LanguageModelProviderId {
882 pub const fn new(id: &'static str) -> Self {
883 Self(SharedString::new_static(id))
884 }
885}
886
887impl LanguageModelProviderName {
888 pub const fn new(id: &'static str) -> Self {
889 Self(SharedString::new_static(id))
890 }
891}
892
893impl fmt::Display for LanguageModelProviderId {
894 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
895 write!(f, "{}", self.0)
896 }
897}
898
899impl fmt::Display for LanguageModelProviderName {
900 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
901 write!(f, "{}", self.0)
902 }
903}
904
905impl From<String> for LanguageModelId {
906 fn from(value: String) -> Self {
907 Self(SharedString::from(value))
908 }
909}
910
911impl From<String> for LanguageModelName {
912 fn from(value: String) -> Self {
913 Self(SharedString::from(value))
914 }
915}
916
917impl From<String> for LanguageModelProviderId {
918 fn from(value: String) -> Self {
919 Self(SharedString::from(value))
920 }
921}
922
923impl From<String> for LanguageModelProviderName {
924 fn from(value: String) -> Self {
925 Self(SharedString::from(value))
926 }
927}
928
929impl From<Arc<str>> for LanguageModelProviderId {
930 fn from(value: Arc<str>) -> Self {
931 Self(SharedString::from(value))
932 }
933}
934
935impl From<Arc<str>> for LanguageModelProviderName {
936 fn from(value: Arc<str>) -> Self {
937 Self(SharedString::from(value))
938 }
939}
940
941#[cfg(test)]
942mod tests {
943 use super::*;
944
945 #[test]
946 fn test_from_cloud_failure_with_upstream_http_error() {
947 let error = LanguageModelCompletionError::from_cloud_failure(
948 String::from("anthropic").into(),
949 "upstream_http_error".to_string(),
950 r#"{"code":"upstream_http_error","message":"Received an error from the Anthropic API: upstream connect error or disconnect/reset before headers. reset reason: connection timeout","upstream_status":503}"#.to_string(),
951 None,
952 );
953
954 match error {
955 LanguageModelCompletionError::ServerOverloaded { provider, .. } => {
956 assert_eq!(provider.0, "anthropic");
957 }
958 _ => panic!(
959 "Expected ServerOverloaded error for 503 status, got: {:?}",
960 error
961 ),
962 }
963
964 let error = LanguageModelCompletionError::from_cloud_failure(
965 String::from("anthropic").into(),
966 "upstream_http_error".to_string(),
967 r#"{"code":"upstream_http_error","message":"Internal server error","upstream_status":500}"#.to_string(),
968 None,
969 );
970
971 match error {
972 LanguageModelCompletionError::ApiInternalServerError { provider, message } => {
973 assert_eq!(provider.0, "anthropic");
974 assert_eq!(message, "Internal server error");
975 }
976 _ => panic!(
977 "Expected ApiInternalServerError for 500 status, got: {:?}",
978 error
979 ),
980 }
981 }
982
983 #[test]
984 fn test_from_cloud_failure_with_standard_format() {
985 let error = LanguageModelCompletionError::from_cloud_failure(
986 String::from("anthropic").into(),
987 "upstream_http_503".to_string(),
988 "Service unavailable".to_string(),
989 None,
990 );
991
992 match error {
993 LanguageModelCompletionError::ServerOverloaded { provider, .. } => {
994 assert_eq!(provider.0, "anthropic");
995 }
996 _ => panic!("Expected ServerOverloaded error for upstream_http_503"),
997 }
998 }
999
1000 #[test]
1001 fn test_upstream_http_error_connection_timeout() {
1002 let error = LanguageModelCompletionError::from_cloud_failure(
1003 String::from("anthropic").into(),
1004 "upstream_http_error".to_string(),
1005 r#"{"code":"upstream_http_error","message":"Received an error from the Anthropic API: upstream connect error or disconnect/reset before headers. reset reason: connection timeout","upstream_status":503}"#.to_string(),
1006 None,
1007 );
1008
1009 match error {
1010 LanguageModelCompletionError::ServerOverloaded { provider, .. } => {
1011 assert_eq!(provider.0, "anthropic");
1012 }
1013 _ => panic!(
1014 "Expected ServerOverloaded error for connection timeout with 503 status, got: {:?}",
1015 error
1016 ),
1017 }
1018
1019 let error = LanguageModelCompletionError::from_cloud_failure(
1020 String::from("anthropic").into(),
1021 "upstream_http_error".to_string(),
1022 r#"{"code":"upstream_http_error","message":"Received an error from the Anthropic API: upstream connect error or disconnect/reset before headers. reset reason: connection timeout","upstream_status":500}"#.to_string(),
1023 None,
1024 );
1025
1026 match error {
1027 LanguageModelCompletionError::ApiInternalServerError { provider, message } => {
1028 assert_eq!(provider.0, "anthropic");
1029 assert_eq!(
1030 message,
1031 "Received an error from the Anthropic API: upstream connect error or disconnect/reset before headers. reset reason: connection timeout"
1032 );
1033 }
1034 _ => panic!(
1035 "Expected ApiInternalServerError for connection timeout with 500 status, got: {:?}",
1036 error
1037 ),
1038 }
1039 }
1040
1041 #[test]
1042 fn test_language_model_tool_use_serializes_with_signature() {
1043 use serde_json::json;
1044
1045 let tool_use = LanguageModelToolUse {
1046 id: LanguageModelToolUseId::from("test_id"),
1047 name: "test_tool".into(),
1048 raw_input: json!({"arg": "value"}).to_string(),
1049 input: json!({"arg": "value"}),
1050 is_input_complete: true,
1051 thought_signature: Some("test_signature".to_string()),
1052 };
1053
1054 let serialized = serde_json::to_value(&tool_use).unwrap();
1055
1056 assert_eq!(serialized["id"], "test_id");
1057 assert_eq!(serialized["name"], "test_tool");
1058 assert_eq!(serialized["thought_signature"], "test_signature");
1059 }
1060
1061 #[test]
1062 fn test_language_model_tool_use_deserializes_with_missing_signature() {
1063 use serde_json::json;
1064
1065 let json = json!({
1066 "id": "test_id",
1067 "name": "test_tool",
1068 "raw_input": "{\"arg\":\"value\"}",
1069 "input": {"arg": "value"},
1070 "is_input_complete": true
1071 });
1072
1073 let tool_use: LanguageModelToolUse = serde_json::from_value(json).unwrap();
1074
1075 assert_eq!(tool_use.id, LanguageModelToolUseId::from("test_id"));
1076 assert_eq!(tool_use.name.as_ref(), "test_tool");
1077 assert_eq!(tool_use.thought_signature, None);
1078 }
1079
1080 #[test]
1081 fn test_language_model_tool_use_round_trip_with_signature() {
1082 use serde_json::json;
1083
1084 let original = LanguageModelToolUse {
1085 id: LanguageModelToolUseId::from("round_trip_id"),
1086 name: "round_trip_tool".into(),
1087 raw_input: json!({"key": "value"}).to_string(),
1088 input: json!({"key": "value"}),
1089 is_input_complete: true,
1090 thought_signature: Some("round_trip_sig".to_string()),
1091 };
1092
1093 let serialized = serde_json::to_value(&original).unwrap();
1094 let deserialized: LanguageModelToolUse = serde_json::from_value(serialized).unwrap();
1095
1096 assert_eq!(deserialized.id, original.id);
1097 assert_eq!(deserialized.name, original.name);
1098 assert_eq!(deserialized.thought_signature, original.thought_signature);
1099 }
1100
1101 #[test]
1102 fn test_language_model_tool_use_round_trip_without_signature() {
1103 use serde_json::json;
1104
1105 let original = LanguageModelToolUse {
1106 id: LanguageModelToolUseId::from("no_sig_id"),
1107 name: "no_sig_tool".into(),
1108 raw_input: json!({"arg": "value"}).to_string(),
1109 input: json!({"arg": "value"}),
1110 is_input_complete: true,
1111 thought_signature: None,
1112 };
1113
1114 let serialized = serde_json::to_value(&original).unwrap();
1115 let deserialized: LanguageModelToolUse = serde_json::from_value(serialized).unwrap();
1116
1117 assert_eq!(deserialized.id, original.id);
1118 assert_eq!(deserialized.name, original.name);
1119 assert_eq!(deserialized.thought_signature, None);
1120 }
1121}