language_model.rs

  1pub mod logging;
  2mod model;
  3pub mod provider;
  4mod rate_limiter;
  5mod registry;
  6mod request;
  7mod role;
  8pub mod settings;
  9
 10use anyhow::Result;
 11use client::{Client, UserStore};
 12use futures::FutureExt;
 13use futures::{future::BoxFuture, stream::BoxStream, StreamExt, TryStreamExt as _};
 14use gpui::{
 15    AnyElement, AnyView, AppContext, AsyncAppContext, Model, SharedString, Task, WindowContext,
 16};
 17pub use model::*;
 18use project::Fs;
 19use proto::Plan;
 20pub(crate) use rate_limiter::*;
 21pub use registry::*;
 22pub use request::*;
 23pub use role::*;
 24use schemars::JsonSchema;
 25use serde::{de::DeserializeOwned, Deserialize, Serialize};
 26use std::fmt;
 27use std::{future::Future, sync::Arc};
 28use ui::IconName;
 29
 30pub fn init(
 31    user_store: Model<UserStore>,
 32    client: Arc<Client>,
 33    fs: Arc<dyn Fs>,
 34    cx: &mut AppContext,
 35) {
 36    settings::init(fs, cx);
 37    registry::init(user_store, client, cx);
 38}
 39
 40/// The availability of a [`LanguageModel`].
 41#[derive(Debug, PartialEq, Eq, Clone, Copy)]
 42pub enum LanguageModelAvailability {
 43    /// The language model is available to the general public.
 44    Public,
 45    /// The language model is available to users on the indicated plan.
 46    RequiresPlan(Plan),
 47}
 48
 49/// Configuration for caching language model messages.
 50#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema)]
 51pub struct LanguageModelCacheConfiguration {
 52    pub max_cache_anchors: usize,
 53    pub should_speculate: bool,
 54    pub min_total_token: usize,
 55}
 56
 57/// A completion event from a language model.
 58#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)]
 59pub enum LanguageModelCompletionEvent {
 60    Stop(StopReason),
 61    Text(String),
 62    ToolUse(LanguageModelToolUse),
 63    StartMessage { message_id: String },
 64}
 65
 66#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)]
 67#[serde(rename_all = "snake_case")]
 68pub enum StopReason {
 69    EndTurn,
 70    MaxTokens,
 71    ToolUse,
 72}
 73
 74#[derive(Debug, PartialEq, Eq, Hash, Clone, Serialize, Deserialize)]
 75pub struct LanguageModelToolUse {
 76    pub id: String,
 77    pub name: String,
 78    pub input: serde_json::Value,
 79}
 80
 81pub struct LanguageModelTextStream {
 82    pub message_id: Option<String>,
 83    pub stream: BoxStream<'static, Result<String>>,
 84}
 85
 86impl Default for LanguageModelTextStream {
 87    fn default() -> Self {
 88        Self {
 89            message_id: None,
 90            stream: Box::pin(futures::stream::empty()),
 91        }
 92    }
 93}
 94
 95pub trait LanguageModel: Send + Sync {
 96    fn id(&self) -> LanguageModelId;
 97    fn name(&self) -> LanguageModelName;
 98    /// If None, falls back to [LanguageModelProvider::icon]
 99    fn icon(&self) -> Option<IconName> {
100        None
101    }
102    fn provider_id(&self) -> LanguageModelProviderId;
103    fn provider_name(&self) -> LanguageModelProviderName;
104    fn telemetry_id(&self) -> String;
105
106    fn api_key(&self, _cx: &AppContext) -> Option<String> {
107        None
108    }
109
110    /// Returns the availability of this language model.
111    fn availability(&self) -> LanguageModelAvailability {
112        LanguageModelAvailability::Public
113    }
114
115    fn max_token_count(&self) -> usize;
116    fn max_output_tokens(&self) -> Option<u32> {
117        None
118    }
119
120    fn count_tokens(
121        &self,
122        request: LanguageModelRequest,
123        cx: &AppContext,
124    ) -> BoxFuture<'static, Result<usize>>;
125
126    fn stream_completion(
127        &self,
128        request: LanguageModelRequest,
129        cx: &AsyncAppContext,
130    ) -> BoxFuture<'static, Result<BoxStream<'static, Result<LanguageModelCompletionEvent>>>>;
131
132    fn stream_completion_text(
133        &self,
134        request: LanguageModelRequest,
135        cx: &AsyncAppContext,
136    ) -> BoxFuture<'static, Result<LanguageModelTextStream>> {
137        let events = self.stream_completion(request, cx);
138
139        async move {
140            let mut events = events.await?;
141            let mut message_id = None;
142            let mut first_item_text = None;
143
144            if let Some(first_event) = events.next().await {
145                match first_event {
146                    Ok(LanguageModelCompletionEvent::StartMessage { message_id: id }) => {
147                        message_id = Some(id.clone());
148                    }
149                    Ok(LanguageModelCompletionEvent::Text(text)) => {
150                        first_item_text = Some(text);
151                    }
152                    _ => (),
153                }
154            }
155
156            let stream = futures::stream::iter(first_item_text.map(Ok))
157                .chain(events.filter_map(|result| async move {
158                    match result {
159                        Ok(LanguageModelCompletionEvent::StartMessage { .. }) => None,
160                        Ok(LanguageModelCompletionEvent::Text(text)) => Some(Ok(text)),
161                        Ok(LanguageModelCompletionEvent::Stop(_)) => None,
162                        Ok(LanguageModelCompletionEvent::ToolUse(_)) => None,
163                        Err(err) => Some(Err(err)),
164                    }
165                }))
166                .boxed();
167
168            Ok(LanguageModelTextStream { message_id, stream })
169        }
170        .boxed()
171    }
172
173    fn use_any_tool(
174        &self,
175        request: LanguageModelRequest,
176        name: String,
177        description: String,
178        schema: serde_json::Value,
179        cx: &AsyncAppContext,
180    ) -> BoxFuture<'static, Result<BoxStream<'static, Result<String>>>>;
181
182    fn cache_configuration(&self) -> Option<LanguageModelCacheConfiguration> {
183        None
184    }
185
186    #[cfg(any(test, feature = "test-support"))]
187    fn as_fake(&self) -> &provider::fake::FakeLanguageModel {
188        unimplemented!()
189    }
190}
191
192impl dyn LanguageModel {
193    pub fn use_tool<T: LanguageModelTool>(
194        &self,
195        request: LanguageModelRequest,
196        cx: &AsyncAppContext,
197    ) -> impl 'static + Future<Output = Result<T>> {
198        let schema = schemars::schema_for!(T);
199        let schema_json = serde_json::to_value(&schema).unwrap();
200        let stream = self.use_any_tool(request, T::name(), T::description(), schema_json, cx);
201        async move {
202            let stream = stream.await?;
203            let response = stream.try_collect::<String>().await?;
204            Ok(serde_json::from_str(&response)?)
205        }
206    }
207
208    pub fn use_tool_stream<T: LanguageModelTool>(
209        &self,
210        request: LanguageModelRequest,
211        cx: &AsyncAppContext,
212    ) -> BoxFuture<'static, Result<BoxStream<'static, Result<String>>>> {
213        let schema = schemars::schema_for!(T);
214        let schema_json = serde_json::to_value(&schema).unwrap();
215        self.use_any_tool(request, T::name(), T::description(), schema_json, cx)
216    }
217}
218
219pub trait LanguageModelTool: 'static + DeserializeOwned + JsonSchema {
220    fn name() -> String;
221    fn description() -> String;
222}
223
224pub trait LanguageModelProvider: 'static {
225    fn id(&self) -> LanguageModelProviderId;
226    fn name(&self) -> LanguageModelProviderName;
227    fn icon(&self) -> IconName {
228        IconName::ZedAssistant
229    }
230    fn provided_models(&self, cx: &AppContext) -> Vec<Arc<dyn LanguageModel>>;
231    fn load_model(&self, _model: Arc<dyn LanguageModel>, _cx: &AppContext) {}
232    fn is_authenticated(&self, cx: &AppContext) -> bool;
233    fn authenticate(&self, cx: &mut AppContext) -> Task<Result<()>>;
234    fn configuration_view(&self, cx: &mut WindowContext) -> AnyView;
235    fn must_accept_terms(&self, _cx: &AppContext) -> bool {
236        false
237    }
238    fn render_accept_terms(&self, _cx: &mut WindowContext) -> Option<AnyElement> {
239        None
240    }
241    fn reset_credentials(&self, cx: &mut AppContext) -> Task<Result<()>>;
242}
243
244pub trait LanguageModelProviderState: 'static {
245    type ObservableEntity;
246
247    fn observable_entity(&self) -> Option<gpui::Model<Self::ObservableEntity>>;
248
249    fn subscribe<T: 'static>(
250        &self,
251        cx: &mut gpui::ModelContext<T>,
252        callback: impl Fn(&mut T, &mut gpui::ModelContext<T>) + 'static,
253    ) -> Option<gpui::Subscription> {
254        let entity = self.observable_entity()?;
255        Some(cx.observe(&entity, move |this, _, cx| {
256            callback(this, cx);
257        }))
258    }
259}
260
261#[derive(Clone, Eq, PartialEq, Hash, Debug, Ord, PartialOrd)]
262pub struct LanguageModelId(pub SharedString);
263
264#[derive(Clone, Eq, PartialEq, Hash, Debug, Ord, PartialOrd)]
265pub struct LanguageModelName(pub SharedString);
266
267#[derive(Clone, Eq, PartialEq, Hash, Debug, Ord, PartialOrd)]
268pub struct LanguageModelProviderId(pub SharedString);
269
270#[derive(Clone, Eq, PartialEq, Hash, Debug, Ord, PartialOrd)]
271pub struct LanguageModelProviderName(pub SharedString);
272
273impl fmt::Display for LanguageModelProviderId {
274    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
275        write!(f, "{}", self.0)
276    }
277}
278
279impl From<String> for LanguageModelId {
280    fn from(value: String) -> Self {
281        Self(SharedString::from(value))
282    }
283}
284
285impl From<String> for LanguageModelName {
286    fn from(value: String) -> Self {
287        Self(SharedString::from(value))
288    }
289}
290
291impl From<String> for LanguageModelProviderId {
292    fn from(value: String) -> Self {
293        Self(SharedString::from(value))
294    }
295}
296
297impl From<String> for LanguageModelProviderName {
298    fn from(value: String) -> Self {
299        Self(SharedString::from(value))
300    }
301}