language_model.rs

  1mod model;
  2mod rate_limiter;
  3mod registry;
  4mod request;
  5mod role;
  6
  7#[cfg(any(test, feature = "test-support"))]
  8pub mod fake_provider;
  9
 10use anyhow::Result;
 11use futures::FutureExt;
 12use futures::{future::BoxFuture, stream::BoxStream, StreamExt, TryStreamExt as _};
 13use gpui::{AnyElement, AnyView, AppContext, AsyncAppContext, SharedString, Task, WindowContext};
 14pub use model::*;
 15use proto::Plan;
 16pub use rate_limiter::*;
 17pub use registry::*;
 18pub use request::*;
 19pub use role::*;
 20use schemars::JsonSchema;
 21use serde::{de::DeserializeOwned, Deserialize, Serialize};
 22use std::fmt;
 23use std::{future::Future, sync::Arc};
 24use ui::IconName;
 25
 26pub const ZED_CLOUD_PROVIDER_ID: &str = "zed.dev";
 27
 28pub fn init(cx: &mut AppContext) {
 29    registry::init(cx);
 30}
 31
 32/// The availability of a [`LanguageModel`].
 33#[derive(Debug, PartialEq, Eq, Clone, Copy)]
 34pub enum LanguageModelAvailability {
 35    /// The language model is available to the general public.
 36    Public,
 37    /// The language model is available to users on the indicated plan.
 38    RequiresPlan(Plan),
 39}
 40
 41/// Configuration for caching language model messages.
 42#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema)]
 43pub struct LanguageModelCacheConfiguration {
 44    pub max_cache_anchors: usize,
 45    pub should_speculate: bool,
 46    pub min_total_token: usize,
 47}
 48
 49/// A completion event from a language model.
 50#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)]
 51pub enum LanguageModelCompletionEvent {
 52    Stop(StopReason),
 53    Text(String),
 54    ToolUse(LanguageModelToolUse),
 55    StartMessage { message_id: String },
 56}
 57
 58#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)]
 59#[serde(rename_all = "snake_case")]
 60pub enum StopReason {
 61    EndTurn,
 62    MaxTokens,
 63    ToolUse,
 64}
 65
 66#[derive(Debug, PartialEq, Eq, Hash, Clone, Serialize, Deserialize)]
 67pub struct LanguageModelToolUse {
 68    pub id: String,
 69    pub name: String,
 70    pub input: serde_json::Value,
 71}
 72
 73pub struct LanguageModelTextStream {
 74    pub message_id: Option<String>,
 75    pub stream: BoxStream<'static, Result<String>>,
 76}
 77
 78impl Default for LanguageModelTextStream {
 79    fn default() -> Self {
 80        Self {
 81            message_id: None,
 82            stream: Box::pin(futures::stream::empty()),
 83        }
 84    }
 85}
 86
 87pub trait LanguageModel: Send + Sync {
 88    fn id(&self) -> LanguageModelId;
 89    fn name(&self) -> LanguageModelName;
 90    /// If None, falls back to [LanguageModelProvider::icon]
 91    fn icon(&self) -> Option<IconName> {
 92        None
 93    }
 94    fn provider_id(&self) -> LanguageModelProviderId;
 95    fn provider_name(&self) -> LanguageModelProviderName;
 96    fn telemetry_id(&self) -> String;
 97
 98    fn api_key(&self, _cx: &AppContext) -> Option<String> {
 99        None
100    }
101
102    /// Returns the availability of this language model.
103    fn availability(&self) -> LanguageModelAvailability {
104        LanguageModelAvailability::Public
105    }
106
107    fn max_token_count(&self) -> usize;
108    fn max_output_tokens(&self) -> Option<u32> {
109        None
110    }
111
112    fn count_tokens(
113        &self,
114        request: LanguageModelRequest,
115        cx: &AppContext,
116    ) -> BoxFuture<'static, Result<usize>>;
117
118    fn stream_completion(
119        &self,
120        request: LanguageModelRequest,
121        cx: &AsyncAppContext,
122    ) -> BoxFuture<'static, Result<BoxStream<'static, Result<LanguageModelCompletionEvent>>>>;
123
124    fn stream_completion_text(
125        &self,
126        request: LanguageModelRequest,
127        cx: &AsyncAppContext,
128    ) -> BoxFuture<'static, Result<LanguageModelTextStream>> {
129        let events = self.stream_completion(request, cx);
130
131        async move {
132            let mut events = events.await?;
133            let mut message_id = None;
134            let mut first_item_text = None;
135
136            if let Some(first_event) = events.next().await {
137                match first_event {
138                    Ok(LanguageModelCompletionEvent::StartMessage { message_id: id }) => {
139                        message_id = Some(id.clone());
140                    }
141                    Ok(LanguageModelCompletionEvent::Text(text)) => {
142                        first_item_text = Some(text);
143                    }
144                    _ => (),
145                }
146            }
147
148            let stream = futures::stream::iter(first_item_text.map(Ok))
149                .chain(events.filter_map(|result| async move {
150                    match result {
151                        Ok(LanguageModelCompletionEvent::StartMessage { .. }) => None,
152                        Ok(LanguageModelCompletionEvent::Text(text)) => Some(Ok(text)),
153                        Ok(LanguageModelCompletionEvent::Stop(_)) => None,
154                        Ok(LanguageModelCompletionEvent::ToolUse(_)) => None,
155                        Err(err) => Some(Err(err)),
156                    }
157                }))
158                .boxed();
159
160            Ok(LanguageModelTextStream { message_id, stream })
161        }
162        .boxed()
163    }
164
165    fn use_any_tool(
166        &self,
167        request: LanguageModelRequest,
168        name: String,
169        description: String,
170        schema: serde_json::Value,
171        cx: &AsyncAppContext,
172    ) -> BoxFuture<'static, Result<BoxStream<'static, Result<String>>>>;
173
174    fn cache_configuration(&self) -> Option<LanguageModelCacheConfiguration> {
175        None
176    }
177
178    #[cfg(any(test, feature = "test-support"))]
179    fn as_fake(&self) -> &fake_provider::FakeLanguageModel {
180        unimplemented!()
181    }
182}
183
184impl dyn LanguageModel {
185    pub fn use_tool<T: LanguageModelTool>(
186        &self,
187        request: LanguageModelRequest,
188        cx: &AsyncAppContext,
189    ) -> impl 'static + Future<Output = Result<T>> {
190        let schema = schemars::schema_for!(T);
191        let schema_json = serde_json::to_value(&schema).unwrap();
192        let stream = self.use_any_tool(request, T::name(), T::description(), schema_json, cx);
193        async move {
194            let stream = stream.await?;
195            let response = stream.try_collect::<String>().await?;
196            Ok(serde_json::from_str(&response)?)
197        }
198    }
199
200    pub fn use_tool_stream<T: LanguageModelTool>(
201        &self,
202        request: LanguageModelRequest,
203        cx: &AsyncAppContext,
204    ) -> BoxFuture<'static, Result<BoxStream<'static, Result<String>>>> {
205        let schema = schemars::schema_for!(T);
206        let schema_json = serde_json::to_value(&schema).unwrap();
207        self.use_any_tool(request, T::name(), T::description(), schema_json, cx)
208    }
209}
210
211pub trait LanguageModelTool: 'static + DeserializeOwned + JsonSchema {
212    fn name() -> String;
213    fn description() -> String;
214}
215
216pub trait LanguageModelProvider: 'static {
217    fn id(&self) -> LanguageModelProviderId;
218    fn name(&self) -> LanguageModelProviderName;
219    fn icon(&self) -> IconName {
220        IconName::ZedAssistant
221    }
222    fn provided_models(&self, cx: &AppContext) -> Vec<Arc<dyn LanguageModel>>;
223    fn load_model(&self, _model: Arc<dyn LanguageModel>, _cx: &AppContext) {}
224    fn is_authenticated(&self, cx: &AppContext) -> bool;
225    fn authenticate(&self, cx: &mut AppContext) -> Task<Result<()>>;
226    fn configuration_view(&self, cx: &mut WindowContext) -> AnyView;
227    fn must_accept_terms(&self, _cx: &AppContext) -> bool {
228        false
229    }
230    fn render_accept_terms(&self, _cx: &mut WindowContext) -> Option<AnyElement> {
231        None
232    }
233    fn reset_credentials(&self, cx: &mut AppContext) -> Task<Result<()>>;
234}
235
236pub trait LanguageModelProviderState: 'static {
237    type ObservableEntity;
238
239    fn observable_entity(&self) -> Option<gpui::Model<Self::ObservableEntity>>;
240
241    fn subscribe<T: 'static>(
242        &self,
243        cx: &mut gpui::ModelContext<T>,
244        callback: impl Fn(&mut T, &mut gpui::ModelContext<T>) + 'static,
245    ) -> Option<gpui::Subscription> {
246        let entity = self.observable_entity()?;
247        Some(cx.observe(&entity, move |this, _, cx| {
248            callback(this, cx);
249        }))
250    }
251}
252
253#[derive(Clone, Eq, PartialEq, Hash, Debug, Ord, PartialOrd)]
254pub struct LanguageModelId(pub SharedString);
255
256#[derive(Clone, Eq, PartialEq, Hash, Debug, Ord, PartialOrd)]
257pub struct LanguageModelName(pub SharedString);
258
259#[derive(Clone, Eq, PartialEq, Hash, Debug, Ord, PartialOrd)]
260pub struct LanguageModelProviderId(pub SharedString);
261
262#[derive(Clone, Eq, PartialEq, Hash, Debug, Ord, PartialOrd)]
263pub struct LanguageModelProviderName(pub SharedString);
264
265impl fmt::Display for LanguageModelProviderId {
266    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
267        write!(f, "{}", self.0)
268    }
269}
270
271impl From<String> for LanguageModelId {
272    fn from(value: String) -> Self {
273        Self(SharedString::from(value))
274    }
275}
276
277impl From<String> for LanguageModelName {
278    fn from(value: String) -> Self {
279        Self(SharedString::from(value))
280    }
281}
282
283impl From<String> for LanguageModelProviderId {
284    fn from(value: String) -> Self {
285        Self(SharedString::from(value))
286    }
287}
288
289impl From<String> for LanguageModelProviderName {
290    fn from(value: String) -> Self {
291        Self(SharedString::from(value))
292    }
293}