1mod model;
2pub mod provider;
3mod rate_limiter;
4mod registry;
5mod request;
6mod role;
7pub mod settings;
8
9use anyhow::Result;
10use client::{Client, UserStore};
11use futures::{future::BoxFuture, stream::BoxStream, TryStreamExt as _};
12use gpui::{
13 AnyElement, AnyView, AppContext, AsyncAppContext, Model, SharedString, Task, WindowContext,
14};
15pub use model::*;
16use project::Fs;
17use proto::Plan;
18pub(crate) use rate_limiter::*;
19pub use registry::*;
20pub use request::*;
21pub use role::*;
22use schemars::JsonSchema;
23use serde::{de::DeserializeOwned, Deserialize, Serialize};
24use std::{future::Future, sync::Arc};
25use ui::IconName;
26
27pub fn init(
28 user_store: Model<UserStore>,
29 client: Arc<Client>,
30 fs: Arc<dyn Fs>,
31 cx: &mut AppContext,
32) {
33 settings::init(fs, cx);
34 registry::init(user_store, client, cx);
35}
36
37/// The availability of a [`LanguageModel`].
38#[derive(Debug, PartialEq, Eq, Clone, Copy)]
39pub enum LanguageModelAvailability {
40 /// The language model is available to the general public.
41 Public,
42 /// The language model is available to users on the indicated plan.
43 RequiresPlan(Plan),
44}
45
46/// Configuration for caching language model messages.
47#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema)]
48pub struct LanguageModelCacheConfiguration {
49 pub max_cache_anchors: usize,
50 pub should_speculate: bool,
51 pub min_total_token: usize,
52}
53
54pub trait LanguageModel: Send + Sync {
55 fn id(&self) -> LanguageModelId;
56 fn name(&self) -> LanguageModelName;
57 fn provider_id(&self) -> LanguageModelProviderId;
58 fn provider_name(&self) -> LanguageModelProviderName;
59 fn telemetry_id(&self) -> String;
60
61 /// Returns the availability of this language model.
62 fn availability(&self) -> LanguageModelAvailability {
63 LanguageModelAvailability::Public
64 }
65
66 fn max_token_count(&self) -> usize;
67
68 fn count_tokens(
69 &self,
70 request: LanguageModelRequest,
71 cx: &AppContext,
72 ) -> BoxFuture<'static, Result<usize>>;
73
74 fn stream_completion(
75 &self,
76 request: LanguageModelRequest,
77 cx: &AsyncAppContext,
78 ) -> BoxFuture<'static, Result<BoxStream<'static, Result<String>>>>;
79
80 fn use_any_tool(
81 &self,
82 request: LanguageModelRequest,
83 name: String,
84 description: String,
85 schema: serde_json::Value,
86 cx: &AsyncAppContext,
87 ) -> BoxFuture<'static, Result<BoxStream<'static, Result<String>>>>;
88
89 fn cache_configuration(&self) -> Option<LanguageModelCacheConfiguration> {
90 None
91 }
92
93 #[cfg(any(test, feature = "test-support"))]
94 fn as_fake(&self) -> &provider::fake::FakeLanguageModel {
95 unimplemented!()
96 }
97}
98
99impl dyn LanguageModel {
100 pub fn use_tool<T: LanguageModelTool>(
101 &self,
102 request: LanguageModelRequest,
103 cx: &AsyncAppContext,
104 ) -> impl 'static + Future<Output = Result<T>> {
105 let schema = schemars::schema_for!(T);
106 let schema_json = serde_json::to_value(&schema).unwrap();
107 let stream = self.use_any_tool(request, T::name(), T::description(), schema_json, cx);
108 async move {
109 let stream = stream.await?;
110 let response = stream.try_collect::<String>().await?;
111 Ok(serde_json::from_str(&response)?)
112 }
113 }
114
115 pub fn use_tool_stream<T: LanguageModelTool>(
116 &self,
117 request: LanguageModelRequest,
118 cx: &AsyncAppContext,
119 ) -> BoxFuture<'static, Result<BoxStream<'static, Result<String>>>> {
120 let schema = schemars::schema_for!(T);
121 let schema_json = serde_json::to_value(&schema).unwrap();
122 self.use_any_tool(request, T::name(), T::description(), schema_json, cx)
123 }
124}
125
126pub trait LanguageModelTool: 'static + DeserializeOwned + JsonSchema {
127 fn name() -> String;
128 fn description() -> String;
129}
130
131pub trait LanguageModelProvider: 'static {
132 fn id(&self) -> LanguageModelProviderId;
133 fn name(&self) -> LanguageModelProviderName;
134 fn icon(&self) -> IconName {
135 IconName::ZedAssistant
136 }
137 fn provided_models(&self, cx: &AppContext) -> Vec<Arc<dyn LanguageModel>>;
138 fn load_model(&self, _model: Arc<dyn LanguageModel>, _cx: &AppContext) {}
139 fn is_authenticated(&self, cx: &AppContext) -> bool;
140 fn authenticate(&self, cx: &mut AppContext) -> Task<Result<()>>;
141 fn configuration_view(&self, cx: &mut WindowContext) -> AnyView;
142 fn must_accept_terms(&self, _cx: &AppContext) -> bool {
143 false
144 }
145 fn render_accept_terms(&self, _cx: &mut WindowContext) -> Option<AnyElement> {
146 None
147 }
148 fn reset_credentials(&self, cx: &mut AppContext) -> Task<Result<()>>;
149}
150
151pub trait LanguageModelProviderState: 'static {
152 type ObservableEntity;
153
154 fn observable_entity(&self) -> Option<gpui::Model<Self::ObservableEntity>>;
155
156 fn subscribe<T: 'static>(
157 &self,
158 cx: &mut gpui::ModelContext<T>,
159 callback: impl Fn(&mut T, &mut gpui::ModelContext<T>) + 'static,
160 ) -> Option<gpui::Subscription> {
161 let entity = self.observable_entity()?;
162 Some(cx.observe(&entity, move |this, _, cx| {
163 callback(this, cx);
164 }))
165 }
166}
167
168#[derive(Clone, Eq, PartialEq, Hash, Debug, Ord, PartialOrd)]
169pub struct LanguageModelId(pub SharedString);
170
171#[derive(Clone, Eq, PartialEq, Hash, Debug, Ord, PartialOrd)]
172pub struct LanguageModelName(pub SharedString);
173
174#[derive(Clone, Eq, PartialEq, Hash, Debug, Ord, PartialOrd)]
175pub struct LanguageModelProviderId(pub SharedString);
176
177#[derive(Clone, Eq, PartialEq, Hash, Debug, Ord, PartialOrd)]
178pub struct LanguageModelProviderName(pub SharedString);
179
180impl From<String> for LanguageModelId {
181 fn from(value: String) -> Self {
182 Self(SharedString::from(value))
183 }
184}
185
186impl From<String> for LanguageModelName {
187 fn from(value: String) -> Self {
188 Self(SharedString::from(value))
189 }
190}
191
192impl From<String> for LanguageModelProviderId {
193 fn from(value: String) -> Self {
194 Self(SharedString::from(value))
195 }
196}
197
198impl From<String> for LanguageModelProviderName {
199 fn from(value: String) -> Self {
200 Self(SharedString::from(value))
201 }
202}