1mod model;
2mod rate_limiter;
3mod registry;
4mod request;
5mod role;
6mod telemetry;
7
8#[cfg(any(test, feature = "test-support"))]
9pub mod fake_provider;
10
11use anyhow::{Result, anyhow};
12use client::Client;
13use futures::FutureExt;
14use futures::{StreamExt, future::BoxFuture, stream::BoxStream};
15use gpui::{AnyElement, AnyView, App, AsyncApp, SharedString, Task, Window};
16use http_client::http::{HeaderMap, HeaderValue};
17use icons::IconName;
18use parking_lot::Mutex;
19use proto::Plan;
20use schemars::JsonSchema;
21use serde::{Deserialize, Serialize, de::DeserializeOwned};
22use std::fmt;
23use std::ops::{Add, Sub};
24use std::str::FromStr as _;
25use std::sync::Arc;
26use thiserror::Error;
27use util::serde::is_default;
28use zed_llm_client::{
29 CompletionRequestStatus, MODEL_REQUESTS_USAGE_AMOUNT_HEADER_NAME,
30 MODEL_REQUESTS_USAGE_LIMIT_HEADER_NAME, UsageLimit,
31};
32
33pub use crate::model::*;
34pub use crate::rate_limiter::*;
35pub use crate::registry::*;
36pub use crate::request::*;
37pub use crate::role::*;
38pub use crate::telemetry::*;
39
40pub const ZED_CLOUD_PROVIDER_ID: &str = "zed.dev";
41
42pub fn init(client: Arc<Client>, cx: &mut App) {
43 init_settings(cx);
44 RefreshLlmTokenListener::register(client.clone(), cx);
45}
46
47pub fn init_settings(cx: &mut App) {
48 registry::init(cx);
49}
50
51/// The availability of a [`LanguageModel`].
52#[derive(Debug, PartialEq, Eq, Clone, Copy)]
53pub enum LanguageModelAvailability {
54 /// The language model is available to the general public.
55 Public,
56 /// The language model is available to users on the indicated plan.
57 RequiresPlan(Plan),
58}
59
60/// Configuration for caching language model messages.
61#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema)]
62pub struct LanguageModelCacheConfiguration {
63 pub max_cache_anchors: usize,
64 pub should_speculate: bool,
65 pub min_total_token: usize,
66}
67
68/// A completion event from a language model.
69#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)]
70pub enum LanguageModelCompletionEvent {
71 StatusUpdate(CompletionRequestStatus),
72 Stop(StopReason),
73 Text(String),
74 Thinking {
75 text: String,
76 signature: Option<String>,
77 },
78 ToolUse(LanguageModelToolUse),
79 StartMessage {
80 message_id: String,
81 },
82 UsageUpdate(TokenUsage),
83}
84
85#[derive(Error, Debug)]
86pub enum LanguageModelCompletionError {
87 #[error("received bad input JSON")]
88 BadInputJson {
89 id: LanguageModelToolUseId,
90 tool_name: Arc<str>,
91 raw_input: Arc<str>,
92 json_parse_error: String,
93 },
94 #[error(transparent)]
95 Other(#[from] anyhow::Error),
96}
97
98/// Indicates the format used to define the input schema for a language model tool.
99#[derive(Debug, PartialEq, Eq, Clone, Copy, Hash)]
100pub enum LanguageModelToolSchemaFormat {
101 /// A JSON schema, see https://json-schema.org
102 JsonSchema,
103 /// A subset of an OpenAPI 3.0 schema object supported by Google AI, see https://ai.google.dev/api/caching#Schema
104 JsonSchemaSubset,
105}
106
107#[derive(Debug, PartialEq, Clone, Copy, Serialize, Deserialize)]
108#[serde(rename_all = "snake_case")]
109pub enum StopReason {
110 EndTurn,
111 MaxTokens,
112 ToolUse,
113}
114
115#[derive(Debug, Clone, Copy)]
116pub struct RequestUsage {
117 pub limit: UsageLimit,
118 pub amount: i32,
119}
120
121impl RequestUsage {
122 pub fn from_headers(headers: &HeaderMap<HeaderValue>) -> Result<Self> {
123 let limit = headers
124 .get(MODEL_REQUESTS_USAGE_LIMIT_HEADER_NAME)
125 .ok_or_else(|| anyhow!("missing {MODEL_REQUESTS_USAGE_LIMIT_HEADER_NAME:?} header"))?;
126 let limit = UsageLimit::from_str(limit.to_str()?)?;
127
128 let amount = headers
129 .get(MODEL_REQUESTS_USAGE_AMOUNT_HEADER_NAME)
130 .ok_or_else(|| anyhow!("missing {MODEL_REQUESTS_USAGE_AMOUNT_HEADER_NAME:?} header"))?;
131 let amount = amount.to_str()?.parse::<i32>()?;
132
133 Ok(Self { limit, amount })
134 }
135}
136
137#[derive(Debug, PartialEq, Clone, Copy, Serialize, Deserialize, Default)]
138pub struct TokenUsage {
139 #[serde(default, skip_serializing_if = "is_default")]
140 pub input_tokens: u32,
141 #[serde(default, skip_serializing_if = "is_default")]
142 pub output_tokens: u32,
143 #[serde(default, skip_serializing_if = "is_default")]
144 pub cache_creation_input_tokens: u32,
145 #[serde(default, skip_serializing_if = "is_default")]
146 pub cache_read_input_tokens: u32,
147}
148
149impl TokenUsage {
150 pub fn total_tokens(&self) -> u32 {
151 self.input_tokens
152 + self.output_tokens
153 + self.cache_read_input_tokens
154 + self.cache_creation_input_tokens
155 }
156}
157
158impl Add<TokenUsage> for TokenUsage {
159 type Output = Self;
160
161 fn add(self, other: Self) -> Self {
162 Self {
163 input_tokens: self.input_tokens + other.input_tokens,
164 output_tokens: self.output_tokens + other.output_tokens,
165 cache_creation_input_tokens: self.cache_creation_input_tokens
166 + other.cache_creation_input_tokens,
167 cache_read_input_tokens: self.cache_read_input_tokens + other.cache_read_input_tokens,
168 }
169 }
170}
171
172impl Sub<TokenUsage> for TokenUsage {
173 type Output = Self;
174
175 fn sub(self, other: Self) -> Self {
176 Self {
177 input_tokens: self.input_tokens - other.input_tokens,
178 output_tokens: self.output_tokens - other.output_tokens,
179 cache_creation_input_tokens: self.cache_creation_input_tokens
180 - other.cache_creation_input_tokens,
181 cache_read_input_tokens: self.cache_read_input_tokens - other.cache_read_input_tokens,
182 }
183 }
184}
185
186#[derive(Debug, PartialEq, Eq, Hash, Clone, Serialize, Deserialize)]
187pub struct LanguageModelToolUseId(Arc<str>);
188
189impl fmt::Display for LanguageModelToolUseId {
190 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
191 write!(f, "{}", self.0)
192 }
193}
194
195impl<T> From<T> for LanguageModelToolUseId
196where
197 T: Into<Arc<str>>,
198{
199 fn from(value: T) -> Self {
200 Self(value.into())
201 }
202}
203
204#[derive(Debug, PartialEq, Eq, Hash, Clone, Serialize, Deserialize)]
205pub struct LanguageModelToolUse {
206 pub id: LanguageModelToolUseId,
207 pub name: Arc<str>,
208 pub raw_input: String,
209 pub input: serde_json::Value,
210 pub is_input_complete: bool,
211}
212
213pub struct LanguageModelTextStream {
214 pub message_id: Option<String>,
215 pub stream: BoxStream<'static, Result<String, LanguageModelCompletionError>>,
216 // Has complete token usage after the stream has finished
217 pub last_token_usage: Arc<Mutex<TokenUsage>>,
218}
219
220impl Default for LanguageModelTextStream {
221 fn default() -> Self {
222 Self {
223 message_id: None,
224 stream: Box::pin(futures::stream::empty()),
225 last_token_usage: Arc::new(Mutex::new(TokenUsage::default())),
226 }
227 }
228}
229
230pub trait LanguageModel: Send + Sync {
231 fn id(&self) -> LanguageModelId;
232 fn name(&self) -> LanguageModelName;
233 fn provider_id(&self) -> LanguageModelProviderId;
234 fn provider_name(&self) -> LanguageModelProviderName;
235 fn telemetry_id(&self) -> String;
236
237 fn api_key(&self, _cx: &App) -> Option<String> {
238 None
239 }
240
241 /// Returns the availability of this language model.
242 fn availability(&self) -> LanguageModelAvailability {
243 LanguageModelAvailability::Public
244 }
245
246 /// Whether this model supports images
247 fn supports_images(&self) -> bool;
248
249 /// Whether this model supports tools.
250 fn supports_tools(&self) -> bool;
251
252 /// Whether this model supports choosing which tool to use.
253 fn supports_tool_choice(&self, choice: LanguageModelToolChoice) -> bool;
254
255 /// Returns whether this model supports "max mode";
256 fn supports_max_mode(&self) -> bool {
257 if self.provider_id().0 != ZED_CLOUD_PROVIDER_ID {
258 return false;
259 }
260
261 const MAX_MODE_CAPABLE_MODELS: &[CloudModel] = &[
262 CloudModel::Anthropic(anthropic::Model::Claude3_7Sonnet),
263 CloudModel::Anthropic(anthropic::Model::Claude3_7SonnetThinking),
264 ];
265
266 for model in MAX_MODE_CAPABLE_MODELS {
267 if self.id().0 == model.id() {
268 return true;
269 }
270 }
271
272 false
273 }
274
275 fn tool_input_format(&self) -> LanguageModelToolSchemaFormat {
276 LanguageModelToolSchemaFormat::JsonSchema
277 }
278
279 fn max_token_count(&self) -> usize;
280 fn max_output_tokens(&self) -> Option<u32> {
281 None
282 }
283
284 fn count_tokens(
285 &self,
286 request: LanguageModelRequest,
287 cx: &App,
288 ) -> BoxFuture<'static, Result<usize>>;
289
290 fn stream_completion(
291 &self,
292 request: LanguageModelRequest,
293 cx: &AsyncApp,
294 ) -> BoxFuture<
295 'static,
296 Result<
297 BoxStream<'static, Result<LanguageModelCompletionEvent, LanguageModelCompletionError>>,
298 >,
299 >;
300
301 fn stream_completion_text(
302 &self,
303 request: LanguageModelRequest,
304 cx: &AsyncApp,
305 ) -> BoxFuture<'static, Result<LanguageModelTextStream>> {
306 let future = self.stream_completion(request, cx);
307
308 async move {
309 let events = future.await?;
310 let mut events = events.fuse();
311 let mut message_id = None;
312 let mut first_item_text = None;
313 let last_token_usage = Arc::new(Mutex::new(TokenUsage::default()));
314
315 if let Some(first_event) = events.next().await {
316 match first_event {
317 Ok(LanguageModelCompletionEvent::StartMessage { message_id: id }) => {
318 message_id = Some(id.clone());
319 }
320 Ok(LanguageModelCompletionEvent::Text(text)) => {
321 first_item_text = Some(text);
322 }
323 _ => (),
324 }
325 }
326
327 let stream = futures::stream::iter(first_item_text.map(Ok))
328 .chain(events.filter_map({
329 let last_token_usage = last_token_usage.clone();
330 move |result| {
331 let last_token_usage = last_token_usage.clone();
332 async move {
333 match result {
334 Ok(LanguageModelCompletionEvent::StatusUpdate { .. }) => None,
335 Ok(LanguageModelCompletionEvent::StartMessage { .. }) => None,
336 Ok(LanguageModelCompletionEvent::Text(text)) => Some(Ok(text)),
337 Ok(LanguageModelCompletionEvent::Thinking { .. }) => None,
338 Ok(LanguageModelCompletionEvent::Stop(_)) => None,
339 Ok(LanguageModelCompletionEvent::ToolUse(_)) => None,
340 Ok(LanguageModelCompletionEvent::UsageUpdate(token_usage)) => {
341 *last_token_usage.lock() = token_usage;
342 None
343 }
344 Err(err) => Some(Err(err)),
345 }
346 }
347 }
348 }))
349 .boxed();
350
351 Ok(LanguageModelTextStream {
352 message_id,
353 stream,
354 last_token_usage,
355 })
356 }
357 .boxed()
358 }
359
360 fn cache_configuration(&self) -> Option<LanguageModelCacheConfiguration> {
361 None
362 }
363
364 #[cfg(any(test, feature = "test-support"))]
365 fn as_fake(&self) -> &fake_provider::FakeLanguageModel {
366 unimplemented!()
367 }
368}
369
370#[derive(Debug, Error)]
371pub enum LanguageModelKnownError {
372 #[error("Context window limit exceeded ({tokens})")]
373 ContextWindowLimitExceeded { tokens: usize },
374}
375
376pub trait LanguageModelTool: 'static + DeserializeOwned + JsonSchema {
377 fn name() -> String;
378 fn description() -> String;
379}
380
381/// An error that occurred when trying to authenticate the language model provider.
382#[derive(Debug, Error)]
383pub enum AuthenticateError {
384 #[error("credentials not found")]
385 CredentialsNotFound,
386 #[error(transparent)]
387 Other(#[from] anyhow::Error),
388}
389
390pub trait LanguageModelProvider: 'static {
391 fn id(&self) -> LanguageModelProviderId;
392 fn name(&self) -> LanguageModelProviderName;
393 fn icon(&self) -> IconName {
394 IconName::ZedAssistant
395 }
396 fn default_model(&self, cx: &App) -> Option<Arc<dyn LanguageModel>>;
397 fn default_fast_model(&self, cx: &App) -> Option<Arc<dyn LanguageModel>>;
398 fn provided_models(&self, cx: &App) -> Vec<Arc<dyn LanguageModel>>;
399 fn recommended_models(&self, _cx: &App) -> Vec<Arc<dyn LanguageModel>> {
400 Vec::new()
401 }
402 fn load_model(&self, _model: Arc<dyn LanguageModel>, _cx: &App) {}
403 fn is_authenticated(&self, cx: &App) -> bool;
404 fn authenticate(&self, cx: &mut App) -> Task<Result<(), AuthenticateError>>;
405 fn configuration_view(&self, window: &mut Window, cx: &mut App) -> AnyView;
406 fn must_accept_terms(&self, _cx: &App) -> bool {
407 false
408 }
409 fn render_accept_terms(
410 &self,
411 _view: LanguageModelProviderTosView,
412 _cx: &mut App,
413 ) -> Option<AnyElement> {
414 None
415 }
416 fn reset_credentials(&self, cx: &mut App) -> Task<Result<()>>;
417}
418
419#[derive(PartialEq, Eq)]
420pub enum LanguageModelProviderTosView {
421 /// When there are some past interactions in the Agent Panel.
422 ThreadtEmptyState,
423 /// When there are no past interactions in the Agent Panel.
424 ThreadFreshStart,
425 PromptEditorPopup,
426 Configuration,
427}
428
429pub trait LanguageModelProviderState: 'static {
430 type ObservableEntity;
431
432 fn observable_entity(&self) -> Option<gpui::Entity<Self::ObservableEntity>>;
433
434 fn subscribe<T: 'static>(
435 &self,
436 cx: &mut gpui::Context<T>,
437 callback: impl Fn(&mut T, &mut gpui::Context<T>) + 'static,
438 ) -> Option<gpui::Subscription> {
439 let entity = self.observable_entity()?;
440 Some(cx.observe(&entity, move |this, _, cx| {
441 callback(this, cx);
442 }))
443 }
444}
445
446#[derive(Clone, Eq, PartialEq, Hash, Debug, Ord, PartialOrd, Serialize, Deserialize)]
447pub struct LanguageModelId(pub SharedString);
448
449#[derive(Clone, Eq, PartialEq, Hash, Debug, Ord, PartialOrd)]
450pub struct LanguageModelName(pub SharedString);
451
452#[derive(Clone, Eq, PartialEq, Hash, Debug, Ord, PartialOrd)]
453pub struct LanguageModelProviderId(pub SharedString);
454
455#[derive(Clone, Eq, PartialEq, Hash, Debug, Ord, PartialOrd)]
456pub struct LanguageModelProviderName(pub SharedString);
457
458impl fmt::Display for LanguageModelProviderId {
459 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
460 write!(f, "{}", self.0)
461 }
462}
463
464impl From<String> for LanguageModelId {
465 fn from(value: String) -> Self {
466 Self(SharedString::from(value))
467 }
468}
469
470impl From<String> for LanguageModelName {
471 fn from(value: String) -> Self {
472 Self(SharedString::from(value))
473 }
474}
475
476impl From<String> for LanguageModelProviderId {
477 fn from(value: String) -> Self {
478 Self(SharedString::from(value))
479 }
480}
481
482impl From<String> for LanguageModelProviderName {
483 fn from(value: String) -> Self {
484 Self(SharedString::from(value))
485 }
486}