1use anyhow::{Result, anyhow};
2use futures::{FutureExt, StreamExt, future::BoxFuture, stream::BoxStream};
3use gpui::{AnyView, App, AsyncApp, Context, Subscription, Task};
4use http_client::HttpClient;
5use language_model::{AuthenticateError, LanguageModelCompletionEvent};
6use language_model::{
7 LanguageModel, LanguageModelId, LanguageModelName, LanguageModelProvider,
8 LanguageModelProviderId, LanguageModelProviderName, LanguageModelProviderState,
9 LanguageModelRequest, RateLimiter, Role,
10};
11use ollama::{
12 ChatMessage, ChatOptions, ChatRequest, KeepAlive, get_models, preload_model,
13 stream_chat_completion,
14};
15use schemars::JsonSchema;
16use serde::{Deserialize, Serialize};
17use settings::{Settings, SettingsStore};
18use std::{collections::BTreeMap, sync::Arc};
19use ui::{ButtonLike, Indicator, List, prelude::*};
20use util::ResultExt;
21
22use crate::AllLanguageModelSettings;
23use crate::ui::InstructionListItem;
24
25const OLLAMA_DOWNLOAD_URL: &str = "https://ollama.com/download";
26const OLLAMA_LIBRARY_URL: &str = "https://ollama.com/library";
27const OLLAMA_SITE: &str = "https://ollama.com/";
28
29const PROVIDER_ID: &str = "ollama";
30const PROVIDER_NAME: &str = "Ollama";
31
32#[derive(Default, Debug, Clone, PartialEq)]
33pub struct OllamaSettings {
34 pub api_url: String,
35 pub available_models: Vec<AvailableModel>,
36}
37
38#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema)]
39pub struct AvailableModel {
40 /// The model name in the Ollama API (e.g. "llama3.2:latest")
41 pub name: String,
42 /// The model's name in Zed's UI, such as in the model selector dropdown menu in the assistant panel.
43 pub display_name: Option<String>,
44 /// The Context Length parameter to the model (aka num_ctx or n_ctx)
45 pub max_tokens: usize,
46 /// The number of seconds to keep the connection open after the last request
47 pub keep_alive: Option<KeepAlive>,
48}
49
50pub struct OllamaLanguageModelProvider {
51 http_client: Arc<dyn HttpClient>,
52 state: gpui::Entity<State>,
53}
54
55pub struct State {
56 http_client: Arc<dyn HttpClient>,
57 available_models: Vec<ollama::Model>,
58 fetch_model_task: Option<Task<Result<()>>>,
59 _subscription: Subscription,
60}
61
62impl State {
63 fn is_authenticated(&self) -> bool {
64 !self.available_models.is_empty()
65 }
66
67 fn fetch_models(&mut self, cx: &mut Context<Self>) -> Task<Result<()>> {
68 let settings = &AllLanguageModelSettings::get_global(cx).ollama;
69 let http_client = self.http_client.clone();
70 let api_url = settings.api_url.clone();
71
72 // As a proxy for the server being "authenticated", we'll check if its up by fetching the models
73 cx.spawn(async move |this, cx| {
74 let models = get_models(http_client.as_ref(), &api_url, None).await?;
75
76 let mut models: Vec<ollama::Model> = models
77 .into_iter()
78 // Since there is no metadata from the Ollama API
79 // indicating which models are embedding models,
80 // simply filter out models with "-embed" in their name
81 .filter(|model| !model.name.contains("-embed"))
82 .map(|model| ollama::Model::new(&model.name, None, None))
83 .collect();
84
85 models.sort_by(|a, b| a.name.cmp(&b.name));
86
87 this.update(cx, |this, cx| {
88 this.available_models = models;
89 cx.notify();
90 })
91 })
92 }
93
94 fn restart_fetch_models_task(&mut self, cx: &mut Context<Self>) {
95 let task = self.fetch_models(cx);
96 self.fetch_model_task.replace(task);
97 }
98
99 fn authenticate(&mut self, cx: &mut Context<Self>) -> Task<Result<(), AuthenticateError>> {
100 if self.is_authenticated() {
101 return Task::ready(Ok(()));
102 }
103
104 let fetch_models_task = self.fetch_models(cx);
105 cx.spawn(async move |_this, _cx| Ok(fetch_models_task.await?))
106 }
107}
108
109impl OllamaLanguageModelProvider {
110 pub fn new(http_client: Arc<dyn HttpClient>, cx: &mut App) -> Self {
111 let this = Self {
112 http_client: http_client.clone(),
113 state: cx.new(|cx| {
114 let subscription = cx.observe_global::<SettingsStore>({
115 let mut settings = AllLanguageModelSettings::get_global(cx).ollama.clone();
116 move |this: &mut State, cx| {
117 let new_settings = &AllLanguageModelSettings::get_global(cx).ollama;
118 if &settings != new_settings {
119 settings = new_settings.clone();
120 this.restart_fetch_models_task(cx);
121 cx.notify();
122 }
123 }
124 });
125
126 State {
127 http_client,
128 available_models: Default::default(),
129 fetch_model_task: None,
130 _subscription: subscription,
131 }
132 }),
133 };
134 this.state
135 .update(cx, |state, cx| state.restart_fetch_models_task(cx));
136 this
137 }
138}
139
140impl LanguageModelProviderState for OllamaLanguageModelProvider {
141 type ObservableEntity = State;
142
143 fn observable_entity(&self) -> Option<gpui::Entity<Self::ObservableEntity>> {
144 Some(self.state.clone())
145 }
146}
147
148impl LanguageModelProvider for OllamaLanguageModelProvider {
149 fn id(&self) -> LanguageModelProviderId {
150 LanguageModelProviderId(PROVIDER_ID.into())
151 }
152
153 fn name(&self) -> LanguageModelProviderName {
154 LanguageModelProviderName(PROVIDER_NAME.into())
155 }
156
157 fn icon(&self) -> IconName {
158 IconName::AiOllama
159 }
160
161 fn default_model(&self, cx: &App) -> Option<Arc<dyn LanguageModel>> {
162 self.provided_models(cx).into_iter().next()
163 }
164
165 fn provided_models(&self, cx: &App) -> Vec<Arc<dyn LanguageModel>> {
166 let mut models: BTreeMap<String, ollama::Model> = BTreeMap::default();
167
168 // Add models from the Ollama API
169 for model in self.state.read(cx).available_models.iter() {
170 models.insert(model.name.clone(), model.clone());
171 }
172
173 // Override with available models from settings
174 for model in AllLanguageModelSettings::get_global(cx)
175 .ollama
176 .available_models
177 .iter()
178 {
179 models.insert(
180 model.name.clone(),
181 ollama::Model {
182 name: model.name.clone(),
183 display_name: model.display_name.clone(),
184 max_tokens: model.max_tokens,
185 keep_alive: model.keep_alive.clone(),
186 },
187 );
188 }
189
190 models
191 .into_values()
192 .map(|model| {
193 Arc::new(OllamaLanguageModel {
194 id: LanguageModelId::from(model.name.clone()),
195 model: model.clone(),
196 http_client: self.http_client.clone(),
197 request_limiter: RateLimiter::new(4),
198 }) as Arc<dyn LanguageModel>
199 })
200 .collect()
201 }
202
203 fn load_model(&self, model: Arc<dyn LanguageModel>, cx: &App) {
204 let settings = &AllLanguageModelSettings::get_global(cx).ollama;
205 let http_client = self.http_client.clone();
206 let api_url = settings.api_url.clone();
207 let id = model.id().0.to_string();
208 cx.spawn(async move |_| preload_model(http_client, &api_url, &id).await)
209 .detach_and_log_err(cx);
210 }
211
212 fn is_authenticated(&self, cx: &App) -> bool {
213 self.state.read(cx).is_authenticated()
214 }
215
216 fn authenticate(&self, cx: &mut App) -> Task<Result<(), AuthenticateError>> {
217 self.state.update(cx, |state, cx| state.authenticate(cx))
218 }
219
220 fn configuration_view(&self, window: &mut Window, cx: &mut App) -> AnyView {
221 let state = self.state.clone();
222 cx.new(|cx| ConfigurationView::new(state, window, cx))
223 .into()
224 }
225
226 fn reset_credentials(&self, cx: &mut App) -> Task<Result<()>> {
227 self.state.update(cx, |state, cx| state.fetch_models(cx))
228 }
229}
230
231pub struct OllamaLanguageModel {
232 id: LanguageModelId,
233 model: ollama::Model,
234 http_client: Arc<dyn HttpClient>,
235 request_limiter: RateLimiter,
236}
237
238impl OllamaLanguageModel {
239 fn to_ollama_request(&self, request: LanguageModelRequest) -> ChatRequest {
240 ChatRequest {
241 model: self.model.name.clone(),
242 messages: request
243 .messages
244 .into_iter()
245 .map(|msg| match msg.role {
246 Role::User => ChatMessage::User {
247 content: msg.string_contents(),
248 },
249 Role::Assistant => ChatMessage::Assistant {
250 content: msg.string_contents(),
251 tool_calls: None,
252 },
253 Role::System => ChatMessage::System {
254 content: msg.string_contents(),
255 },
256 })
257 .collect(),
258 keep_alive: self.model.keep_alive.clone().unwrap_or_default(),
259 stream: true,
260 options: Some(ChatOptions {
261 num_ctx: Some(self.model.max_tokens),
262 stop: Some(request.stop),
263 temperature: request.temperature.or(Some(1.0)),
264 ..Default::default()
265 }),
266 tools: vec![],
267 }
268 }
269}
270
271impl LanguageModel for OllamaLanguageModel {
272 fn id(&self) -> LanguageModelId {
273 self.id.clone()
274 }
275
276 fn name(&self) -> LanguageModelName {
277 LanguageModelName::from(self.model.display_name().to_string())
278 }
279
280 fn provider_id(&self) -> LanguageModelProviderId {
281 LanguageModelProviderId(PROVIDER_ID.into())
282 }
283
284 fn provider_name(&self) -> LanguageModelProviderName {
285 LanguageModelProviderName(PROVIDER_NAME.into())
286 }
287
288 fn supports_tools(&self) -> bool {
289 false
290 }
291
292 fn telemetry_id(&self) -> String {
293 format!("ollama/{}", self.model.id())
294 }
295
296 fn max_token_count(&self) -> usize {
297 self.model.max_token_count()
298 }
299
300 fn count_tokens(
301 &self,
302 request: LanguageModelRequest,
303 _cx: &App,
304 ) -> BoxFuture<'static, Result<usize>> {
305 // There is no endpoint for this _yet_ in Ollama
306 // see: https://github.com/ollama/ollama/issues/1716 and https://github.com/ollama/ollama/issues/3582
307 let token_count = request
308 .messages
309 .iter()
310 .map(|msg| msg.string_contents().chars().count())
311 .sum::<usize>()
312 / 4;
313
314 async move { Ok(token_count) }.boxed()
315 }
316
317 fn stream_completion(
318 &self,
319 request: LanguageModelRequest,
320 cx: &AsyncApp,
321 ) -> BoxFuture<'static, Result<BoxStream<'static, Result<LanguageModelCompletionEvent>>>> {
322 let request = self.to_ollama_request(request);
323
324 let http_client = self.http_client.clone();
325 let Ok(api_url) = cx.update(|cx| {
326 let settings = &AllLanguageModelSettings::get_global(cx).ollama;
327 settings.api_url.clone()
328 }) else {
329 return futures::future::ready(Err(anyhow!("App state dropped"))).boxed();
330 };
331
332 let future = self.request_limiter.stream(async move {
333 let response = stream_chat_completion(http_client.as_ref(), &api_url, request).await?;
334 let stream = response
335 .filter_map(|response| async move {
336 match response {
337 Ok(delta) => {
338 let content = match delta.message {
339 ChatMessage::User { content } => content,
340 ChatMessage::Assistant { content, .. } => content,
341 ChatMessage::System { content } => content,
342 };
343 Some(Ok(content))
344 }
345 Err(error) => Some(Err(error)),
346 }
347 })
348 .boxed();
349 Ok(stream)
350 });
351
352 async move {
353 Ok(future
354 .await?
355 .map(|result| result.map(LanguageModelCompletionEvent::Text))
356 .boxed())
357 }
358 .boxed()
359 }
360}
361
362struct ConfigurationView {
363 state: gpui::Entity<State>,
364 loading_models_task: Option<Task<()>>,
365}
366
367impl ConfigurationView {
368 pub fn new(state: gpui::Entity<State>, window: &mut Window, cx: &mut Context<Self>) -> Self {
369 let loading_models_task = Some(cx.spawn_in(window, {
370 let state = state.clone();
371 async move |this, cx| {
372 if let Some(task) = state
373 .update(cx, |state, cx| state.authenticate(cx))
374 .log_err()
375 {
376 task.await.log_err();
377 }
378 this.update(cx, |this, cx| {
379 this.loading_models_task = None;
380 cx.notify();
381 })
382 .log_err();
383 }
384 }));
385
386 Self {
387 state,
388 loading_models_task,
389 }
390 }
391
392 fn retry_connection(&self, cx: &mut App) {
393 self.state
394 .update(cx, |state, cx| state.fetch_models(cx))
395 .detach_and_log_err(cx);
396 }
397}
398
399impl Render for ConfigurationView {
400 fn render(&mut self, _: &mut Window, cx: &mut Context<Self>) -> impl IntoElement {
401 let is_authenticated = self.state.read(cx).is_authenticated();
402
403 let ollama_intro =
404 "Get up & running with Llama 3.3, Mistral, Gemma 2, and other LLMs with Ollama.";
405
406 if self.loading_models_task.is_some() {
407 div().child(Label::new("Loading models...")).into_any()
408 } else {
409 v_flex()
410 .gap_2()
411 .child(
412 v_flex().gap_1().child(Label::new(ollama_intro)).child(
413 List::new()
414 .child(InstructionListItem::text_only("Ollama must be running with at least one model installed to use it in the assistant."))
415 .child(InstructionListItem::text_only(
416 "Once installed, try `ollama run llama3.2`",
417 )),
418 ),
419 )
420 .child(
421 h_flex()
422 .w_full()
423 .justify_between()
424 .gap_2()
425 .child(
426 h_flex()
427 .w_full()
428 .gap_2()
429 .map(|this| {
430 if is_authenticated {
431 this.child(
432 Button::new("ollama-site", "Ollama")
433 .style(ButtonStyle::Subtle)
434 .icon(IconName::ArrowUpRight)
435 .icon_size(IconSize::XSmall)
436 .icon_color(Color::Muted)
437 .on_click(move |_, _, cx| cx.open_url(OLLAMA_SITE))
438 .into_any_element(),
439 )
440 } else {
441 this.child(
442 Button::new(
443 "download_ollama_button",
444 "Download Ollama",
445 )
446 .style(ButtonStyle::Subtle)
447 .icon(IconName::ArrowUpRight)
448 .icon_size(IconSize::XSmall)
449 .icon_color(Color::Muted)
450 .on_click(move |_, _, cx| {
451 cx.open_url(OLLAMA_DOWNLOAD_URL)
452 })
453 .into_any_element(),
454 )
455 }
456 })
457 .child(
458 Button::new("view-models", "All Models")
459 .style(ButtonStyle::Subtle)
460 .icon(IconName::ArrowUpRight)
461 .icon_size(IconSize::XSmall)
462 .icon_color(Color::Muted)
463 .on_click(move |_, _, cx| cx.open_url(OLLAMA_LIBRARY_URL)),
464 ),
465 )
466 .map(|this| {
467 if is_authenticated {
468 this.child(
469 ButtonLike::new("connected")
470 .disabled(true)
471 .cursor_style(gpui::CursorStyle::Arrow)
472 .child(
473 h_flex()
474 .gap_2()
475 .child(Indicator::dot().color(Color::Success))
476 .child(Label::new("Connected"))
477 .into_any_element(),
478 ),
479 )
480 } else {
481 this.child(
482 Button::new("retry_ollama_models", "Connect")
483 .icon_position(IconPosition::Start)
484 .icon_size(IconSize::XSmall)
485 .icon(IconName::Play)
486 .on_click(cx.listener(move |this, _, _, cx| {
487 this.retry_connection(cx)
488 })),
489 )
490 }
491 })
492 )
493 .into_any()
494 }
495 }
496}