1mod model;
2mod rate_limiter;
3mod registry;
4mod request;
5mod role;
6mod telemetry;
7
8#[cfg(any(test, feature = "test-support"))]
9pub mod fake_provider;
10
11use anyhow::Result;
12use client::Client;
13use futures::FutureExt;
14use futures::{StreamExt, future::BoxFuture, stream::BoxStream};
15use gpui::{AnyElement, AnyView, App, AsyncApp, SharedString, Task, Window};
16use icons::IconName;
17use parking_lot::Mutex;
18use proto::Plan;
19use schemars::JsonSchema;
20use serde::{Deserialize, Serialize, de::DeserializeOwned};
21use std::fmt;
22use std::ops::{Add, Sub};
23use std::sync::Arc;
24use thiserror::Error;
25use util::serde::is_default;
26
27pub use crate::model::*;
28pub use crate::rate_limiter::*;
29pub use crate::registry::*;
30pub use crate::request::*;
31pub use crate::role::*;
32pub use crate::telemetry::*;
33
34pub const ZED_CLOUD_PROVIDER_ID: &str = "zed.dev";
35
36pub fn init(client: Arc<Client>, cx: &mut App) {
37 registry::init(cx);
38 RefreshLlmTokenListener::register(client.clone(), cx);
39}
40
41/// The availability of a [`LanguageModel`].
42#[derive(Debug, PartialEq, Eq, Clone, Copy)]
43pub enum LanguageModelAvailability {
44 /// The language model is available to the general public.
45 Public,
46 /// The language model is available to users on the indicated plan.
47 RequiresPlan(Plan),
48}
49
50/// Configuration for caching language model messages.
51#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema)]
52pub struct LanguageModelCacheConfiguration {
53 pub max_cache_anchors: usize,
54 pub should_speculate: bool,
55 pub min_total_token: usize,
56}
57
58/// A completion event from a language model.
59#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)]
60pub enum LanguageModelCompletionEvent {
61 Stop(StopReason),
62 Text(String),
63 Thinking(String),
64 ToolUse(LanguageModelToolUse),
65 StartMessage { message_id: String },
66 UsageUpdate(TokenUsage),
67}
68
69/// Indicates the format used to define the input schema for a language model tool.
70#[derive(Debug, PartialEq, Eq, Clone, Copy)]
71pub enum LanguageModelToolSchemaFormat {
72 /// A JSON schema, see https://json-schema.org
73 JsonSchema,
74 /// A subset of an OpenAPI 3.0 schema object supported by Google AI, see https://ai.google.dev/api/caching#Schema
75 JsonSchemaSubset,
76}
77
78#[derive(Debug, PartialEq, Clone, Copy, Serialize, Deserialize)]
79#[serde(rename_all = "snake_case")]
80pub enum StopReason {
81 EndTurn,
82 MaxTokens,
83 ToolUse,
84}
85
86#[derive(Debug, PartialEq, Clone, Serialize, Deserialize, Default)]
87pub struct TokenUsage {
88 #[serde(default, skip_serializing_if = "is_default")]
89 pub input_tokens: u32,
90 #[serde(default, skip_serializing_if = "is_default")]
91 pub output_tokens: u32,
92 #[serde(default, skip_serializing_if = "is_default")]
93 pub cache_creation_input_tokens: u32,
94 #[serde(default, skip_serializing_if = "is_default")]
95 pub cache_read_input_tokens: u32,
96}
97
98impl TokenUsage {
99 pub fn total_tokens(&self) -> u32 {
100 self.input_tokens + self.output_tokens
101 }
102}
103
104impl Add<TokenUsage> for TokenUsage {
105 type Output = Self;
106
107 fn add(self, other: Self) -> Self {
108 Self {
109 input_tokens: self.input_tokens + other.input_tokens,
110 output_tokens: self.output_tokens + other.output_tokens,
111 cache_creation_input_tokens: self.cache_creation_input_tokens
112 + other.cache_creation_input_tokens,
113 cache_read_input_tokens: self.cache_read_input_tokens + other.cache_read_input_tokens,
114 }
115 }
116}
117
118impl Sub<TokenUsage> for TokenUsage {
119 type Output = Self;
120
121 fn sub(self, other: Self) -> Self {
122 Self {
123 input_tokens: self.input_tokens - other.input_tokens,
124 output_tokens: self.output_tokens - other.output_tokens,
125 cache_creation_input_tokens: self.cache_creation_input_tokens
126 - other.cache_creation_input_tokens,
127 cache_read_input_tokens: self.cache_read_input_tokens - other.cache_read_input_tokens,
128 }
129 }
130}
131
132#[derive(Debug, PartialEq, Eq, Hash, Clone, Serialize, Deserialize)]
133pub struct LanguageModelToolUseId(Arc<str>);
134
135impl fmt::Display for LanguageModelToolUseId {
136 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
137 write!(f, "{}", self.0)
138 }
139}
140
141impl<T> From<T> for LanguageModelToolUseId
142where
143 T: Into<Arc<str>>,
144{
145 fn from(value: T) -> Self {
146 Self(value.into())
147 }
148}
149
150#[derive(Debug, PartialEq, Eq, Hash, Clone, Serialize, Deserialize)]
151pub struct LanguageModelToolUse {
152 pub id: LanguageModelToolUseId,
153 pub name: Arc<str>,
154 pub input: serde_json::Value,
155}
156
157pub struct LanguageModelTextStream {
158 pub message_id: Option<String>,
159 pub stream: BoxStream<'static, Result<String>>,
160 // Has complete token usage after the stream has finished
161 pub last_token_usage: Arc<Mutex<TokenUsage>>,
162}
163
164impl Default for LanguageModelTextStream {
165 fn default() -> Self {
166 Self {
167 message_id: None,
168 stream: Box::pin(futures::stream::empty()),
169 last_token_usage: Arc::new(Mutex::new(TokenUsage::default())),
170 }
171 }
172}
173
174pub trait LanguageModel: Send + Sync {
175 fn id(&self) -> LanguageModelId;
176 fn name(&self) -> LanguageModelName;
177 /// If None, falls back to [LanguageModelProvider::icon]
178 fn icon(&self) -> Option<IconName> {
179 None
180 }
181 fn provider_id(&self) -> LanguageModelProviderId;
182 fn provider_name(&self) -> LanguageModelProviderName;
183 fn telemetry_id(&self) -> String;
184
185 fn api_key(&self, _cx: &App) -> Option<String> {
186 None
187 }
188
189 /// Returns the availability of this language model.
190 fn availability(&self) -> LanguageModelAvailability {
191 LanguageModelAvailability::Public
192 }
193
194 /// Whether this model supports tools.
195 fn supports_tools(&self) -> bool;
196
197 fn tool_input_format(&self) -> LanguageModelToolSchemaFormat {
198 LanguageModelToolSchemaFormat::JsonSchema
199 }
200
201 fn max_token_count(&self) -> usize;
202 fn max_output_tokens(&self) -> Option<u32> {
203 None
204 }
205
206 fn count_tokens(
207 &self,
208 request: LanguageModelRequest,
209 cx: &App,
210 ) -> BoxFuture<'static, Result<usize>>;
211
212 fn stream_completion(
213 &self,
214 request: LanguageModelRequest,
215 cx: &AsyncApp,
216 ) -> BoxFuture<'static, Result<BoxStream<'static, Result<LanguageModelCompletionEvent>>>>;
217
218 fn stream_completion_text(
219 &self,
220 request: LanguageModelRequest,
221 cx: &AsyncApp,
222 ) -> BoxFuture<'static, Result<LanguageModelTextStream>> {
223 let events = self.stream_completion(request, cx);
224
225 async move {
226 let mut events = events.await?.fuse();
227 let mut message_id = None;
228 let mut first_item_text = None;
229 let last_token_usage = Arc::new(Mutex::new(TokenUsage::default()));
230
231 if let Some(first_event) = events.next().await {
232 match first_event {
233 Ok(LanguageModelCompletionEvent::StartMessage { message_id: id }) => {
234 message_id = Some(id.clone());
235 }
236 Ok(LanguageModelCompletionEvent::Text(text)) => {
237 first_item_text = Some(text);
238 }
239 _ => (),
240 }
241 }
242
243 let stream = futures::stream::iter(first_item_text.map(Ok))
244 .chain(events.filter_map({
245 let last_token_usage = last_token_usage.clone();
246 move |result| {
247 let last_token_usage = last_token_usage.clone();
248 async move {
249 match result {
250 Ok(LanguageModelCompletionEvent::StartMessage { .. }) => None,
251 Ok(LanguageModelCompletionEvent::Text(text)) => Some(Ok(text)),
252 Ok(LanguageModelCompletionEvent::Thinking(_)) => None,
253 Ok(LanguageModelCompletionEvent::Stop(_)) => None,
254 Ok(LanguageModelCompletionEvent::ToolUse(_)) => None,
255 Ok(LanguageModelCompletionEvent::UsageUpdate(token_usage)) => {
256 *last_token_usage.lock() = token_usage;
257 None
258 }
259 Err(err) => Some(Err(err)),
260 }
261 }
262 }
263 }))
264 .boxed();
265
266 Ok(LanguageModelTextStream {
267 message_id,
268 stream,
269 last_token_usage,
270 })
271 }
272 .boxed()
273 }
274
275 fn cache_configuration(&self) -> Option<LanguageModelCacheConfiguration> {
276 None
277 }
278
279 #[cfg(any(test, feature = "test-support"))]
280 fn as_fake(&self) -> &fake_provider::FakeLanguageModel {
281 unimplemented!()
282 }
283}
284
285pub trait LanguageModelTool: 'static + DeserializeOwned + JsonSchema {
286 fn name() -> String;
287 fn description() -> String;
288}
289
290/// An error that occurred when trying to authenticate the language model provider.
291#[derive(Debug, Error)]
292pub enum AuthenticateError {
293 #[error("credentials not found")]
294 CredentialsNotFound,
295 #[error(transparent)]
296 Other(#[from] anyhow::Error),
297}
298
299pub trait LanguageModelProvider: 'static {
300 fn id(&self) -> LanguageModelProviderId;
301 fn name(&self) -> LanguageModelProviderName;
302 fn icon(&self) -> IconName {
303 IconName::ZedAssistant
304 }
305 fn default_model(&self, cx: &App) -> Option<Arc<dyn LanguageModel>>;
306 fn provided_models(&self, cx: &App) -> Vec<Arc<dyn LanguageModel>>;
307 fn load_model(&self, _model: Arc<dyn LanguageModel>, _cx: &App) {}
308 fn is_authenticated(&self, cx: &App) -> bool;
309 fn authenticate(&self, cx: &mut App) -> Task<Result<(), AuthenticateError>>;
310 fn configuration_view(&self, window: &mut Window, cx: &mut App) -> AnyView;
311 fn must_accept_terms(&self, _cx: &App) -> bool {
312 false
313 }
314 fn render_accept_terms(
315 &self,
316 _view: LanguageModelProviderTosView,
317 _cx: &mut App,
318 ) -> Option<AnyElement> {
319 None
320 }
321 fn reset_credentials(&self, cx: &mut App) -> Task<Result<()>>;
322}
323
324#[derive(PartialEq, Eq)]
325pub enum LanguageModelProviderTosView {
326 /// When there are some past interactions in the Agent Panel.
327 ThreadtEmptyState,
328 /// When there are no past interactions in the Agent Panel.
329 ThreadFreshStart,
330 PromptEditorPopup,
331 Configuration,
332}
333
334pub trait LanguageModelProviderState: 'static {
335 type ObservableEntity;
336
337 fn observable_entity(&self) -> Option<gpui::Entity<Self::ObservableEntity>>;
338
339 fn subscribe<T: 'static>(
340 &self,
341 cx: &mut gpui::Context<T>,
342 callback: impl Fn(&mut T, &mut gpui::Context<T>) + 'static,
343 ) -> Option<gpui::Subscription> {
344 let entity = self.observable_entity()?;
345 Some(cx.observe(&entity, move |this, _, cx| {
346 callback(this, cx);
347 }))
348 }
349}
350
351#[derive(Clone, Eq, PartialEq, Hash, Debug, Ord, PartialOrd)]
352pub struct LanguageModelId(pub SharedString);
353
354#[derive(Clone, Eq, PartialEq, Hash, Debug, Ord, PartialOrd)]
355pub struct LanguageModelName(pub SharedString);
356
357#[derive(Clone, Eq, PartialEq, Hash, Debug, Ord, PartialOrd)]
358pub struct LanguageModelProviderId(pub SharedString);
359
360#[derive(Clone, Eq, PartialEq, Hash, Debug, Ord, PartialOrd)]
361pub struct LanguageModelProviderName(pub SharedString);
362
363impl fmt::Display for LanguageModelProviderId {
364 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
365 write!(f, "{}", self.0)
366 }
367}
368
369impl From<String> for LanguageModelId {
370 fn from(value: String) -> Self {
371 Self(SharedString::from(value))
372 }
373}
374
375impl From<String> for LanguageModelName {
376 fn from(value: String) -> Self {
377 Self(SharedString::from(value))
378 }
379}
380
381impl From<String> for LanguageModelProviderId {
382 fn from(value: String) -> Self {
383 Self(SharedString::from(value))
384 }
385}
386
387impl From<String> for LanguageModelProviderName {
388 fn from(value: String) -> Self {
389 Self(SharedString::from(value))
390 }
391}