1mod model;
2mod rate_limiter;
3mod registry;
4mod request;
5mod role;
6mod telemetry;
7
8#[cfg(any(test, feature = "test-support"))]
9pub mod fake_provider;
10
11use anyhow::{Result, anyhow};
12use client::Client;
13use futures::FutureExt;
14use futures::{StreamExt, future::BoxFuture, stream::BoxStream};
15use gpui::{AnyElement, AnyView, App, AsyncApp, SharedString, Task, Window};
16use http_client::http::{HeaderMap, HeaderValue};
17use icons::IconName;
18use parking_lot::Mutex;
19use schemars::JsonSchema;
20use serde::{Deserialize, Serialize, de::DeserializeOwned};
21use std::fmt;
22use std::ops::{Add, Sub};
23use std::str::FromStr as _;
24use std::sync::Arc;
25use thiserror::Error;
26use util::serde::is_default;
27use zed_llm_client::{
28 CompletionRequestStatus, MODEL_REQUESTS_USAGE_AMOUNT_HEADER_NAME,
29 MODEL_REQUESTS_USAGE_LIMIT_HEADER_NAME, UsageLimit,
30};
31
32pub use crate::model::*;
33pub use crate::rate_limiter::*;
34pub use crate::registry::*;
35pub use crate::request::*;
36pub use crate::role::*;
37pub use crate::telemetry::*;
38
39pub const ZED_CLOUD_PROVIDER_ID: &str = "zed.dev";
40
41pub fn init(client: Arc<Client>, cx: &mut App) {
42 init_settings(cx);
43 RefreshLlmTokenListener::register(client.clone(), cx);
44}
45
46pub fn init_settings(cx: &mut App) {
47 registry::init(cx);
48}
49
50/// Configuration for caching language model messages.
51#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema)]
52pub struct LanguageModelCacheConfiguration {
53 pub max_cache_anchors: usize,
54 pub should_speculate: bool,
55 pub min_total_token: usize,
56}
57
58/// A completion event from a language model.
59#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)]
60pub enum LanguageModelCompletionEvent {
61 StatusUpdate(CompletionRequestStatus),
62 Stop(StopReason),
63 Text(String),
64 Thinking {
65 text: String,
66 signature: Option<String>,
67 },
68 ToolUse(LanguageModelToolUse),
69 StartMessage {
70 message_id: String,
71 },
72 UsageUpdate(TokenUsage),
73}
74
75#[derive(Error, Debug)]
76pub enum LanguageModelCompletionError {
77 #[error("received bad input JSON")]
78 BadInputJson {
79 id: LanguageModelToolUseId,
80 tool_name: Arc<str>,
81 raw_input: Arc<str>,
82 json_parse_error: String,
83 },
84 #[error(transparent)]
85 Other(#[from] anyhow::Error),
86}
87
88/// Indicates the format used to define the input schema for a language model tool.
89#[derive(Debug, PartialEq, Eq, Clone, Copy, Hash)]
90pub enum LanguageModelToolSchemaFormat {
91 /// A JSON schema, see https://json-schema.org
92 JsonSchema,
93 /// A subset of an OpenAPI 3.0 schema object supported by Google AI, see https://ai.google.dev/api/caching#Schema
94 JsonSchemaSubset,
95}
96
97#[derive(Debug, PartialEq, Clone, Copy, Serialize, Deserialize)]
98#[serde(rename_all = "snake_case")]
99pub enum StopReason {
100 EndTurn,
101 MaxTokens,
102 ToolUse,
103 Refusal,
104}
105
106#[derive(Debug, Clone, Copy)]
107pub struct RequestUsage {
108 pub limit: UsageLimit,
109 pub amount: i32,
110}
111
112impl RequestUsage {
113 pub fn from_headers(headers: &HeaderMap<HeaderValue>) -> Result<Self> {
114 let limit = headers
115 .get(MODEL_REQUESTS_USAGE_LIMIT_HEADER_NAME)
116 .ok_or_else(|| anyhow!("missing {MODEL_REQUESTS_USAGE_LIMIT_HEADER_NAME:?} header"))?;
117 let limit = UsageLimit::from_str(limit.to_str()?)?;
118
119 let amount = headers
120 .get(MODEL_REQUESTS_USAGE_AMOUNT_HEADER_NAME)
121 .ok_or_else(|| anyhow!("missing {MODEL_REQUESTS_USAGE_AMOUNT_HEADER_NAME:?} header"))?;
122 let amount = amount.to_str()?.parse::<i32>()?;
123
124 Ok(Self { limit, amount })
125 }
126}
127
128#[derive(Debug, PartialEq, Clone, Copy, Serialize, Deserialize, Default)]
129pub struct TokenUsage {
130 #[serde(default, skip_serializing_if = "is_default")]
131 pub input_tokens: u32,
132 #[serde(default, skip_serializing_if = "is_default")]
133 pub output_tokens: u32,
134 #[serde(default, skip_serializing_if = "is_default")]
135 pub cache_creation_input_tokens: u32,
136 #[serde(default, skip_serializing_if = "is_default")]
137 pub cache_read_input_tokens: u32,
138}
139
140impl TokenUsage {
141 pub fn total_tokens(&self) -> u32 {
142 self.input_tokens
143 + self.output_tokens
144 + self.cache_read_input_tokens
145 + self.cache_creation_input_tokens
146 }
147}
148
149impl Add<TokenUsage> for TokenUsage {
150 type Output = Self;
151
152 fn add(self, other: Self) -> Self {
153 Self {
154 input_tokens: self.input_tokens + other.input_tokens,
155 output_tokens: self.output_tokens + other.output_tokens,
156 cache_creation_input_tokens: self.cache_creation_input_tokens
157 + other.cache_creation_input_tokens,
158 cache_read_input_tokens: self.cache_read_input_tokens + other.cache_read_input_tokens,
159 }
160 }
161}
162
163impl Sub<TokenUsage> for TokenUsage {
164 type Output = Self;
165
166 fn sub(self, other: Self) -> Self {
167 Self {
168 input_tokens: self.input_tokens - other.input_tokens,
169 output_tokens: self.output_tokens - other.output_tokens,
170 cache_creation_input_tokens: self.cache_creation_input_tokens
171 - other.cache_creation_input_tokens,
172 cache_read_input_tokens: self.cache_read_input_tokens - other.cache_read_input_tokens,
173 }
174 }
175}
176
177#[derive(Debug, PartialEq, Eq, Hash, Clone, Serialize, Deserialize)]
178pub struct LanguageModelToolUseId(Arc<str>);
179
180impl fmt::Display for LanguageModelToolUseId {
181 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
182 write!(f, "{}", self.0)
183 }
184}
185
186impl<T> From<T> for LanguageModelToolUseId
187where
188 T: Into<Arc<str>>,
189{
190 fn from(value: T) -> Self {
191 Self(value.into())
192 }
193}
194
195#[derive(Debug, PartialEq, Eq, Hash, Clone, Serialize, Deserialize)]
196pub struct LanguageModelToolUse {
197 pub id: LanguageModelToolUseId,
198 pub name: Arc<str>,
199 pub raw_input: String,
200 pub input: serde_json::Value,
201 pub is_input_complete: bool,
202}
203
204pub struct LanguageModelTextStream {
205 pub message_id: Option<String>,
206 pub stream: BoxStream<'static, Result<String, LanguageModelCompletionError>>,
207 // Has complete token usage after the stream has finished
208 pub last_token_usage: Arc<Mutex<TokenUsage>>,
209}
210
211impl Default for LanguageModelTextStream {
212 fn default() -> Self {
213 Self {
214 message_id: None,
215 stream: Box::pin(futures::stream::empty()),
216 last_token_usage: Arc::new(Mutex::new(TokenUsage::default())),
217 }
218 }
219}
220
221pub trait LanguageModel: Send + Sync {
222 fn id(&self) -> LanguageModelId;
223 fn name(&self) -> LanguageModelName;
224 fn provider_id(&self) -> LanguageModelProviderId;
225 fn provider_name(&self) -> LanguageModelProviderName;
226 fn telemetry_id(&self) -> String;
227
228 fn api_key(&self, _cx: &App) -> Option<String> {
229 None
230 }
231
232 /// Whether this model supports images
233 fn supports_images(&self) -> bool;
234
235 /// Whether this model supports tools.
236 fn supports_tools(&self) -> bool;
237
238 /// Whether this model supports choosing which tool to use.
239 fn supports_tool_choice(&self, choice: LanguageModelToolChoice) -> bool;
240
241 /// Returns whether this model supports "max mode";
242 fn supports_max_mode(&self) -> bool {
243 if self.provider_id().0 != ZED_CLOUD_PROVIDER_ID {
244 return false;
245 }
246
247 const MAX_MODE_CAPABLE_MODELS: &[CloudModel] = &[
248 CloudModel::Anthropic(anthropic::Model::ClaudeOpus4),
249 CloudModel::Anthropic(anthropic::Model::ClaudeOpus4Thinking),
250 CloudModel::Anthropic(anthropic::Model::ClaudeSonnet4),
251 CloudModel::Anthropic(anthropic::Model::ClaudeSonnet4Thinking),
252 CloudModel::Anthropic(anthropic::Model::Claude3_7Sonnet),
253 CloudModel::Anthropic(anthropic::Model::Claude3_7SonnetThinking),
254 ];
255
256 for model in MAX_MODE_CAPABLE_MODELS {
257 if self.id().0 == model.id() {
258 return true;
259 }
260 }
261
262 false
263 }
264
265 fn tool_input_format(&self) -> LanguageModelToolSchemaFormat {
266 LanguageModelToolSchemaFormat::JsonSchema
267 }
268
269 fn max_token_count(&self) -> usize;
270 fn max_output_tokens(&self) -> Option<u32> {
271 None
272 }
273
274 fn count_tokens(
275 &self,
276 request: LanguageModelRequest,
277 cx: &App,
278 ) -> BoxFuture<'static, Result<usize>>;
279
280 fn stream_completion(
281 &self,
282 request: LanguageModelRequest,
283 cx: &AsyncApp,
284 ) -> BoxFuture<
285 'static,
286 Result<
287 BoxStream<'static, Result<LanguageModelCompletionEvent, LanguageModelCompletionError>>,
288 >,
289 >;
290
291 fn stream_completion_text(
292 &self,
293 request: LanguageModelRequest,
294 cx: &AsyncApp,
295 ) -> BoxFuture<'static, Result<LanguageModelTextStream>> {
296 let future = self.stream_completion(request, cx);
297
298 async move {
299 let events = future.await?;
300 let mut events = events.fuse();
301 let mut message_id = None;
302 let mut first_item_text = None;
303 let last_token_usage = Arc::new(Mutex::new(TokenUsage::default()));
304
305 if let Some(first_event) = events.next().await {
306 match first_event {
307 Ok(LanguageModelCompletionEvent::StartMessage { message_id: id }) => {
308 message_id = Some(id.clone());
309 }
310 Ok(LanguageModelCompletionEvent::Text(text)) => {
311 first_item_text = Some(text);
312 }
313 _ => (),
314 }
315 }
316
317 let stream = futures::stream::iter(first_item_text.map(Ok))
318 .chain(events.filter_map({
319 let last_token_usage = last_token_usage.clone();
320 move |result| {
321 let last_token_usage = last_token_usage.clone();
322 async move {
323 match result {
324 Ok(LanguageModelCompletionEvent::StatusUpdate { .. }) => None,
325 Ok(LanguageModelCompletionEvent::StartMessage { .. }) => None,
326 Ok(LanguageModelCompletionEvent::Text(text)) => Some(Ok(text)),
327 Ok(LanguageModelCompletionEvent::Thinking { .. }) => None,
328 Ok(LanguageModelCompletionEvent::Stop(_)) => None,
329 Ok(LanguageModelCompletionEvent::ToolUse(_)) => None,
330 Ok(LanguageModelCompletionEvent::UsageUpdate(token_usage)) => {
331 *last_token_usage.lock() = token_usage;
332 None
333 }
334 Err(err) => Some(Err(err)),
335 }
336 }
337 }
338 }))
339 .boxed();
340
341 Ok(LanguageModelTextStream {
342 message_id,
343 stream,
344 last_token_usage,
345 })
346 }
347 .boxed()
348 }
349
350 fn cache_configuration(&self) -> Option<LanguageModelCacheConfiguration> {
351 None
352 }
353
354 #[cfg(any(test, feature = "test-support"))]
355 fn as_fake(&self) -> &fake_provider::FakeLanguageModel {
356 unimplemented!()
357 }
358}
359
360#[derive(Debug, Error)]
361pub enum LanguageModelKnownError {
362 #[error("Context window limit exceeded ({tokens})")]
363 ContextWindowLimitExceeded { tokens: usize },
364}
365
366pub trait LanguageModelTool: 'static + DeserializeOwned + JsonSchema {
367 fn name() -> String;
368 fn description() -> String;
369}
370
371/// An error that occurred when trying to authenticate the language model provider.
372#[derive(Debug, Error)]
373pub enum AuthenticateError {
374 #[error("credentials not found")]
375 CredentialsNotFound,
376 #[error(transparent)]
377 Other(#[from] anyhow::Error),
378}
379
380pub trait LanguageModelProvider: 'static {
381 fn id(&self) -> LanguageModelProviderId;
382 fn name(&self) -> LanguageModelProviderName;
383 fn icon(&self) -> IconName {
384 IconName::ZedAssistant
385 }
386 fn default_model(&self, cx: &App) -> Option<Arc<dyn LanguageModel>>;
387 fn default_fast_model(&self, cx: &App) -> Option<Arc<dyn LanguageModel>>;
388 fn provided_models(&self, cx: &App) -> Vec<Arc<dyn LanguageModel>>;
389 fn recommended_models(&self, _cx: &App) -> Vec<Arc<dyn LanguageModel>> {
390 Vec::new()
391 }
392 fn load_model(&self, _model: Arc<dyn LanguageModel>, _cx: &App) {}
393 fn is_authenticated(&self, cx: &App) -> bool;
394 fn authenticate(&self, cx: &mut App) -> Task<Result<(), AuthenticateError>>;
395 fn configuration_view(&self, window: &mut Window, cx: &mut App) -> AnyView;
396 fn must_accept_terms(&self, _cx: &App) -> bool {
397 false
398 }
399 fn render_accept_terms(
400 &self,
401 _view: LanguageModelProviderTosView,
402 _cx: &mut App,
403 ) -> Option<AnyElement> {
404 None
405 }
406 fn reset_credentials(&self, cx: &mut App) -> Task<Result<()>>;
407}
408
409#[derive(PartialEq, Eq)]
410pub enum LanguageModelProviderTosView {
411 /// When there are some past interactions in the Agent Panel.
412 ThreadtEmptyState,
413 /// When there are no past interactions in the Agent Panel.
414 ThreadFreshStart,
415 PromptEditorPopup,
416 Configuration,
417}
418
419pub trait LanguageModelProviderState: 'static {
420 type ObservableEntity;
421
422 fn observable_entity(&self) -> Option<gpui::Entity<Self::ObservableEntity>>;
423
424 fn subscribe<T: 'static>(
425 &self,
426 cx: &mut gpui::Context<T>,
427 callback: impl Fn(&mut T, &mut gpui::Context<T>) + 'static,
428 ) -> Option<gpui::Subscription> {
429 let entity = self.observable_entity()?;
430 Some(cx.observe(&entity, move |this, _, cx| {
431 callback(this, cx);
432 }))
433 }
434}
435
436#[derive(Clone, Eq, PartialEq, Hash, Debug, Ord, PartialOrd, Serialize, Deserialize)]
437pub struct LanguageModelId(pub SharedString);
438
439#[derive(Clone, Eq, PartialEq, Hash, Debug, Ord, PartialOrd)]
440pub struct LanguageModelName(pub SharedString);
441
442#[derive(Clone, Eq, PartialEq, Hash, Debug, Ord, PartialOrd)]
443pub struct LanguageModelProviderId(pub SharedString);
444
445#[derive(Clone, Eq, PartialEq, Hash, Debug, Ord, PartialOrd)]
446pub struct LanguageModelProviderName(pub SharedString);
447
448impl fmt::Display for LanguageModelProviderId {
449 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
450 write!(f, "{}", self.0)
451 }
452}
453
454impl From<String> for LanguageModelId {
455 fn from(value: String) -> Self {
456 Self(SharedString::from(value))
457 }
458}
459
460impl From<String> for LanguageModelName {
461 fn from(value: String) -> Self {
462 Self(SharedString::from(value))
463 }
464}
465
466impl From<String> for LanguageModelProviderId {
467 fn from(value: String) -> Self {
468 Self(SharedString::from(value))
469 }
470}
471
472impl From<String> for LanguageModelProviderName {
473 fn from(value: String) -> Self {
474 Self(SharedString::from(value))
475 }
476}