1mod model;
2mod rate_limiter;
3mod registry;
4mod request;
5mod role;
6mod telemetry;
7
8#[cfg(any(test, feature = "test-support"))]
9pub mod fake_provider;
10
11use anyhow::Result;
12use client::Client;
13use futures::FutureExt;
14use futures::{StreamExt, future::BoxFuture, stream::BoxStream};
15use gpui::{AnyElement, AnyView, App, AsyncApp, SharedString, Task, Window};
16use icons::IconName;
17use parking_lot::Mutex;
18use proto::Plan;
19use schemars::JsonSchema;
20use serde::{Deserialize, Serialize, de::DeserializeOwned};
21use std::fmt;
22use std::ops::{Add, Sub};
23use std::sync::Arc;
24use thiserror::Error;
25use util::serde::is_default;
26
27pub use crate::model::*;
28pub use crate::rate_limiter::*;
29pub use crate::registry::*;
30pub use crate::request::*;
31pub use crate::role::*;
32pub use crate::telemetry::*;
33
34pub const ZED_CLOUD_PROVIDER_ID: &str = "zed.dev";
35
36pub fn init(client: Arc<Client>, cx: &mut App) {
37 registry::init(cx);
38 RefreshLlmTokenListener::register(client.clone(), cx);
39}
40
41/// The availability of a [`LanguageModel`].
42#[derive(Debug, PartialEq, Eq, Clone, Copy)]
43pub enum LanguageModelAvailability {
44 /// The language model is available to the general public.
45 Public,
46 /// The language model is available to users on the indicated plan.
47 RequiresPlan(Plan),
48}
49
50/// Configuration for caching language model messages.
51#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema)]
52pub struct LanguageModelCacheConfiguration {
53 pub max_cache_anchors: usize,
54 pub should_speculate: bool,
55 pub min_total_token: usize,
56}
57
58/// A completion event from a language model.
59#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)]
60pub enum LanguageModelCompletionEvent {
61 Stop(StopReason),
62 Text(String),
63 Thinking(String),
64 ToolUse(LanguageModelToolUse),
65 StartMessage { message_id: String },
66 UsageUpdate(TokenUsage),
67}
68
69/// Indicates the format used to define the input schema for a language model tool.
70#[derive(Debug, PartialEq, Eq, Clone, Copy, Hash)]
71pub enum LanguageModelToolSchemaFormat {
72 /// A JSON schema, see https://json-schema.org
73 JsonSchema,
74 /// A subset of an OpenAPI 3.0 schema object supported by Google AI, see https://ai.google.dev/api/caching#Schema
75 JsonSchemaSubset,
76}
77
78#[derive(Debug, PartialEq, Clone, Copy, Serialize, Deserialize)]
79#[serde(rename_all = "snake_case")]
80pub enum StopReason {
81 EndTurn,
82 MaxTokens,
83 ToolUse,
84}
85
86#[derive(Debug, PartialEq, Clone, Copy, Serialize, Deserialize, Default)]
87pub struct TokenUsage {
88 #[serde(default, skip_serializing_if = "is_default")]
89 pub input_tokens: u32,
90 #[serde(default, skip_serializing_if = "is_default")]
91 pub output_tokens: u32,
92 #[serde(default, skip_serializing_if = "is_default")]
93 pub cache_creation_input_tokens: u32,
94 #[serde(default, skip_serializing_if = "is_default")]
95 pub cache_read_input_tokens: u32,
96}
97
98impl TokenUsage {
99 pub fn total_tokens(&self) -> u32 {
100 self.input_tokens
101 + self.output_tokens
102 + self.cache_read_input_tokens
103 + self.cache_creation_input_tokens
104 }
105}
106
107impl Add<TokenUsage> for TokenUsage {
108 type Output = Self;
109
110 fn add(self, other: Self) -> Self {
111 Self {
112 input_tokens: self.input_tokens + other.input_tokens,
113 output_tokens: self.output_tokens + other.output_tokens,
114 cache_creation_input_tokens: self.cache_creation_input_tokens
115 + other.cache_creation_input_tokens,
116 cache_read_input_tokens: self.cache_read_input_tokens + other.cache_read_input_tokens,
117 }
118 }
119}
120
121impl Sub<TokenUsage> for TokenUsage {
122 type Output = Self;
123
124 fn sub(self, other: Self) -> Self {
125 Self {
126 input_tokens: self.input_tokens - other.input_tokens,
127 output_tokens: self.output_tokens - other.output_tokens,
128 cache_creation_input_tokens: self.cache_creation_input_tokens
129 - other.cache_creation_input_tokens,
130 cache_read_input_tokens: self.cache_read_input_tokens - other.cache_read_input_tokens,
131 }
132 }
133}
134
135#[derive(Debug, PartialEq, Eq, Hash, Clone, Serialize, Deserialize)]
136pub struct LanguageModelToolUseId(Arc<str>);
137
138impl fmt::Display for LanguageModelToolUseId {
139 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
140 write!(f, "{}", self.0)
141 }
142}
143
144impl<T> From<T> for LanguageModelToolUseId
145where
146 T: Into<Arc<str>>,
147{
148 fn from(value: T) -> Self {
149 Self(value.into())
150 }
151}
152
153#[derive(Debug, PartialEq, Eq, Hash, Clone, Serialize, Deserialize)]
154pub struct LanguageModelToolUse {
155 pub id: LanguageModelToolUseId,
156 pub name: Arc<str>,
157 pub input: serde_json::Value,
158}
159
160pub struct LanguageModelTextStream {
161 pub message_id: Option<String>,
162 pub stream: BoxStream<'static, Result<String>>,
163 // Has complete token usage after the stream has finished
164 pub last_token_usage: Arc<Mutex<TokenUsage>>,
165}
166
167impl Default for LanguageModelTextStream {
168 fn default() -> Self {
169 Self {
170 message_id: None,
171 stream: Box::pin(futures::stream::empty()),
172 last_token_usage: Arc::new(Mutex::new(TokenUsage::default())),
173 }
174 }
175}
176
177pub trait LanguageModel: Send + Sync {
178 fn id(&self) -> LanguageModelId;
179 fn name(&self) -> LanguageModelName;
180 fn provider_id(&self) -> LanguageModelProviderId;
181 fn provider_name(&self) -> LanguageModelProviderName;
182 fn telemetry_id(&self) -> String;
183
184 fn api_key(&self, _cx: &App) -> Option<String> {
185 None
186 }
187
188 /// Returns the availability of this language model.
189 fn availability(&self) -> LanguageModelAvailability {
190 LanguageModelAvailability::Public
191 }
192
193 /// Whether this model supports tools.
194 fn supports_tools(&self) -> bool;
195
196 fn tool_input_format(&self) -> LanguageModelToolSchemaFormat {
197 LanguageModelToolSchemaFormat::JsonSchema
198 }
199
200 fn max_token_count(&self) -> usize;
201 fn max_output_tokens(&self) -> Option<u32> {
202 None
203 }
204
205 fn count_tokens(
206 &self,
207 request: LanguageModelRequest,
208 cx: &App,
209 ) -> BoxFuture<'static, Result<usize>>;
210
211 fn stream_completion(
212 &self,
213 request: LanguageModelRequest,
214 cx: &AsyncApp,
215 ) -> BoxFuture<'static, Result<BoxStream<'static, Result<LanguageModelCompletionEvent>>>>;
216
217 fn stream_completion_text(
218 &self,
219 request: LanguageModelRequest,
220 cx: &AsyncApp,
221 ) -> BoxFuture<'static, Result<LanguageModelTextStream>> {
222 let events = self.stream_completion(request, cx);
223
224 async move {
225 let mut events = events.await?.fuse();
226 let mut message_id = None;
227 let mut first_item_text = None;
228 let last_token_usage = Arc::new(Mutex::new(TokenUsage::default()));
229
230 if let Some(first_event) = events.next().await {
231 match first_event {
232 Ok(LanguageModelCompletionEvent::StartMessage { message_id: id }) => {
233 message_id = Some(id.clone());
234 }
235 Ok(LanguageModelCompletionEvent::Text(text)) => {
236 first_item_text = Some(text);
237 }
238 _ => (),
239 }
240 }
241
242 let stream = futures::stream::iter(first_item_text.map(Ok))
243 .chain(events.filter_map({
244 let last_token_usage = last_token_usage.clone();
245 move |result| {
246 let last_token_usage = last_token_usage.clone();
247 async move {
248 match result {
249 Ok(LanguageModelCompletionEvent::StartMessage { .. }) => None,
250 Ok(LanguageModelCompletionEvent::Text(text)) => Some(Ok(text)),
251 Ok(LanguageModelCompletionEvent::Thinking(_)) => None,
252 Ok(LanguageModelCompletionEvent::Stop(_)) => None,
253 Ok(LanguageModelCompletionEvent::ToolUse(_)) => None,
254 Ok(LanguageModelCompletionEvent::UsageUpdate(token_usage)) => {
255 *last_token_usage.lock() = token_usage;
256 None
257 }
258 Err(err) => Some(Err(err)),
259 }
260 }
261 }
262 }))
263 .boxed();
264
265 Ok(LanguageModelTextStream {
266 message_id,
267 stream,
268 last_token_usage,
269 })
270 }
271 .boxed()
272 }
273
274 fn cache_configuration(&self) -> Option<LanguageModelCacheConfiguration> {
275 None
276 }
277
278 #[cfg(any(test, feature = "test-support"))]
279 fn as_fake(&self) -> &fake_provider::FakeLanguageModel {
280 unimplemented!()
281 }
282}
283
284#[derive(Debug, Error)]
285pub enum LanguageModelKnownError {
286 #[error("Context window limit exceeded ({tokens})")]
287 ContextWindowLimitExceeded { tokens: usize },
288}
289
290pub trait LanguageModelTool: 'static + DeserializeOwned + JsonSchema {
291 fn name() -> String;
292 fn description() -> String;
293}
294
295/// An error that occurred when trying to authenticate the language model provider.
296#[derive(Debug, Error)]
297pub enum AuthenticateError {
298 #[error("credentials not found")]
299 CredentialsNotFound,
300 #[error(transparent)]
301 Other(#[from] anyhow::Error),
302}
303
304pub trait LanguageModelProvider: 'static {
305 fn id(&self) -> LanguageModelProviderId;
306 fn name(&self) -> LanguageModelProviderName;
307 fn icon(&self) -> IconName {
308 IconName::ZedAssistant
309 }
310 fn default_model(&self, cx: &App) -> Option<Arc<dyn LanguageModel>>;
311 fn provided_models(&self, cx: &App) -> Vec<Arc<dyn LanguageModel>>;
312 fn recommended_models(&self, _cx: &App) -> Vec<Arc<dyn LanguageModel>> {
313 Vec::new()
314 }
315 fn load_model(&self, _model: Arc<dyn LanguageModel>, _cx: &App) {}
316 fn is_authenticated(&self, cx: &App) -> bool;
317 fn authenticate(&self, cx: &mut App) -> Task<Result<(), AuthenticateError>>;
318 fn configuration_view(&self, window: &mut Window, cx: &mut App) -> AnyView;
319 fn must_accept_terms(&self, _cx: &App) -> bool {
320 false
321 }
322 fn render_accept_terms(
323 &self,
324 _view: LanguageModelProviderTosView,
325 _cx: &mut App,
326 ) -> Option<AnyElement> {
327 None
328 }
329 fn reset_credentials(&self, cx: &mut App) -> Task<Result<()>>;
330}
331
332#[derive(PartialEq, Eq)]
333pub enum LanguageModelProviderTosView {
334 /// When there are some past interactions in the Agent Panel.
335 ThreadtEmptyState,
336 /// When there are no past interactions in the Agent Panel.
337 ThreadFreshStart,
338 PromptEditorPopup,
339 Configuration,
340}
341
342pub trait LanguageModelProviderState: 'static {
343 type ObservableEntity;
344
345 fn observable_entity(&self) -> Option<gpui::Entity<Self::ObservableEntity>>;
346
347 fn subscribe<T: 'static>(
348 &self,
349 cx: &mut gpui::Context<T>,
350 callback: impl Fn(&mut T, &mut gpui::Context<T>) + 'static,
351 ) -> Option<gpui::Subscription> {
352 let entity = self.observable_entity()?;
353 Some(cx.observe(&entity, move |this, _, cx| {
354 callback(this, cx);
355 }))
356 }
357}
358
359#[derive(Clone, Eq, PartialEq, Hash, Debug, Ord, PartialOrd, Serialize, Deserialize)]
360pub struct LanguageModelId(pub SharedString);
361
362#[derive(Clone, Eq, PartialEq, Hash, Debug, Ord, PartialOrd)]
363pub struct LanguageModelName(pub SharedString);
364
365#[derive(Clone, Eq, PartialEq, Hash, Debug, Ord, PartialOrd)]
366pub struct LanguageModelProviderId(pub SharedString);
367
368#[derive(Clone, Eq, PartialEq, Hash, Debug, Ord, PartialOrd)]
369pub struct LanguageModelProviderName(pub SharedString);
370
371impl fmt::Display for LanguageModelProviderId {
372 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
373 write!(f, "{}", self.0)
374 }
375}
376
377impl From<String> for LanguageModelId {
378 fn from(value: String) -> Self {
379 Self(SharedString::from(value))
380 }
381}
382
383impl From<String> for LanguageModelName {
384 fn from(value: String) -> Self {
385 Self(SharedString::from(value))
386 }
387}
388
389impl From<String> for LanguageModelProviderId {
390 fn from(value: String) -> Self {
391 Self(SharedString::from(value))
392 }
393}
394
395impl From<String> for LanguageModelProviderName {
396 fn from(value: String) -> Self {
397 Self(SharedString::from(value))
398 }
399}