1use anyhow::{anyhow, bail, Result};
2use futures::{future::BoxFuture, stream::BoxStream, FutureExt, StreamExt};
3use gpui::{AnyView, App, AsyncApp, Context, Subscription, Task};
4use http_client::HttpClient;
5use language_model::{AuthenticateError, LanguageModelCompletionEvent};
6use language_model::{
7 LanguageModel, LanguageModelId, LanguageModelName, LanguageModelProvider,
8 LanguageModelProviderId, LanguageModelProviderName, LanguageModelProviderState,
9 LanguageModelRequest, RateLimiter, Role,
10};
11use ollama::{
12 get_models, preload_model, stream_chat_completion, ChatMessage, ChatOptions, ChatRequest,
13 ChatResponseDelta, KeepAlive, OllamaToolCall,
14};
15use schemars::JsonSchema;
16use serde::{Deserialize, Serialize};
17use settings::{Settings, SettingsStore};
18use std::{collections::BTreeMap, sync::Arc};
19use ui::{prelude::*, ButtonLike, Indicator};
20use util::ResultExt;
21
22use crate::AllLanguageModelSettings;
23
24const OLLAMA_DOWNLOAD_URL: &str = "https://ollama.com/download";
25const OLLAMA_LIBRARY_URL: &str = "https://ollama.com/library";
26const OLLAMA_SITE: &str = "https://ollama.com/";
27
28const PROVIDER_ID: &str = "ollama";
29const PROVIDER_NAME: &str = "Ollama";
30
31#[derive(Default, Debug, Clone, PartialEq)]
32pub struct OllamaSettings {
33 pub api_url: String,
34 pub available_models: Vec<AvailableModel>,
35}
36
37#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema)]
38pub struct AvailableModel {
39 /// The model name in the Ollama API (e.g. "llama3.2:latest")
40 pub name: String,
41 /// The model's name in Zed's UI, such as in the model selector dropdown menu in the assistant panel.
42 pub display_name: Option<String>,
43 /// The Context Length parameter to the model (aka num_ctx or n_ctx)
44 pub max_tokens: usize,
45 /// The number of seconds to keep the connection open after the last request
46 pub keep_alive: Option<KeepAlive>,
47}
48
49pub struct OllamaLanguageModelProvider {
50 http_client: Arc<dyn HttpClient>,
51 state: gpui::Entity<State>,
52}
53
54pub struct State {
55 http_client: Arc<dyn HttpClient>,
56 available_models: Vec<ollama::Model>,
57 fetch_model_task: Option<Task<Result<()>>>,
58 _subscription: Subscription,
59}
60
61impl State {
62 fn is_authenticated(&self) -> bool {
63 !self.available_models.is_empty()
64 }
65
66 fn fetch_models(&mut self, cx: &mut Context<Self>) -> Task<Result<()>> {
67 let settings = &AllLanguageModelSettings::get_global(cx).ollama;
68 let http_client = self.http_client.clone();
69 let api_url = settings.api_url.clone();
70
71 // As a proxy for the server being "authenticated", we'll check if its up by fetching the models
72 cx.spawn(|this, mut cx| async move {
73 let models = get_models(http_client.as_ref(), &api_url, None).await?;
74
75 let mut models: Vec<ollama::Model> = models
76 .into_iter()
77 // Since there is no metadata from the Ollama API
78 // indicating which models are embedding models,
79 // simply filter out models with "-embed" in their name
80 .filter(|model| !model.name.contains("-embed"))
81 .map(|model| ollama::Model::new(&model.name, None, None))
82 .collect();
83
84 models.sort_by(|a, b| a.name.cmp(&b.name));
85
86 this.update(&mut cx, |this, cx| {
87 this.available_models = models;
88 cx.notify();
89 })
90 })
91 }
92
93 fn restart_fetch_models_task(&mut self, cx: &mut Context<Self>) {
94 let task = self.fetch_models(cx);
95 self.fetch_model_task.replace(task);
96 }
97
98 fn authenticate(&mut self, cx: &mut Context<Self>) -> Task<Result<(), AuthenticateError>> {
99 if self.is_authenticated() {
100 return Task::ready(Ok(()));
101 }
102
103 let fetch_models_task = self.fetch_models(cx);
104 cx.spawn(|_this, _cx| async move { Ok(fetch_models_task.await?) })
105 }
106}
107
108impl OllamaLanguageModelProvider {
109 pub fn new(http_client: Arc<dyn HttpClient>, cx: &mut App) -> Self {
110 let this = Self {
111 http_client: http_client.clone(),
112 state: cx.new(|cx| {
113 let subscription = cx.observe_global::<SettingsStore>({
114 let mut settings = AllLanguageModelSettings::get_global(cx).ollama.clone();
115 move |this: &mut State, cx| {
116 let new_settings = &AllLanguageModelSettings::get_global(cx).ollama;
117 if &settings != new_settings {
118 settings = new_settings.clone();
119 this.restart_fetch_models_task(cx);
120 cx.notify();
121 }
122 }
123 });
124
125 State {
126 http_client,
127 available_models: Default::default(),
128 fetch_model_task: None,
129 _subscription: subscription,
130 }
131 }),
132 };
133 this.state
134 .update(cx, |state, cx| state.restart_fetch_models_task(cx));
135 this
136 }
137}
138
139impl LanguageModelProviderState for OllamaLanguageModelProvider {
140 type ObservableEntity = State;
141
142 fn observable_entity(&self) -> Option<gpui::Entity<Self::ObservableEntity>> {
143 Some(self.state.clone())
144 }
145}
146
147impl LanguageModelProvider for OllamaLanguageModelProvider {
148 fn id(&self) -> LanguageModelProviderId {
149 LanguageModelProviderId(PROVIDER_ID.into())
150 }
151
152 fn name(&self) -> LanguageModelProviderName {
153 LanguageModelProviderName(PROVIDER_NAME.into())
154 }
155
156 fn icon(&self) -> IconName {
157 IconName::AiOllama
158 }
159
160 fn provided_models(&self, cx: &App) -> Vec<Arc<dyn LanguageModel>> {
161 let mut models: BTreeMap<String, ollama::Model> = BTreeMap::default();
162
163 // Add models from the Ollama API
164 for model in self.state.read(cx).available_models.iter() {
165 models.insert(model.name.clone(), model.clone());
166 }
167
168 // Override with available models from settings
169 for model in AllLanguageModelSettings::get_global(cx)
170 .ollama
171 .available_models
172 .iter()
173 {
174 models.insert(
175 model.name.clone(),
176 ollama::Model {
177 name: model.name.clone(),
178 display_name: model.display_name.clone(),
179 max_tokens: model.max_tokens,
180 keep_alive: model.keep_alive.clone(),
181 },
182 );
183 }
184
185 models
186 .into_values()
187 .map(|model| {
188 Arc::new(OllamaLanguageModel {
189 id: LanguageModelId::from(model.name.clone()),
190 model: model.clone(),
191 http_client: self.http_client.clone(),
192 request_limiter: RateLimiter::new(4),
193 }) as Arc<dyn LanguageModel>
194 })
195 .collect()
196 }
197
198 fn load_model(&self, model: Arc<dyn LanguageModel>, cx: &App) {
199 let settings = &AllLanguageModelSettings::get_global(cx).ollama;
200 let http_client = self.http_client.clone();
201 let api_url = settings.api_url.clone();
202 let id = model.id().0.to_string();
203 cx.spawn(|_| async move { preload_model(http_client, &api_url, &id).await })
204 .detach_and_log_err(cx);
205 }
206
207 fn is_authenticated(&self, cx: &App) -> bool {
208 self.state.read(cx).is_authenticated()
209 }
210
211 fn authenticate(&self, cx: &mut App) -> Task<Result<(), AuthenticateError>> {
212 self.state.update(cx, |state, cx| state.authenticate(cx))
213 }
214
215 fn configuration_view(&self, window: &mut Window, cx: &mut App) -> AnyView {
216 let state = self.state.clone();
217 cx.new(|cx| ConfigurationView::new(state, window, cx))
218 .into()
219 }
220
221 fn reset_credentials(&self, cx: &mut App) -> Task<Result<()>> {
222 self.state.update(cx, |state, cx| state.fetch_models(cx))
223 }
224}
225
226pub struct OllamaLanguageModel {
227 id: LanguageModelId,
228 model: ollama::Model,
229 http_client: Arc<dyn HttpClient>,
230 request_limiter: RateLimiter,
231}
232
233impl OllamaLanguageModel {
234 fn to_ollama_request(&self, request: LanguageModelRequest) -> ChatRequest {
235 ChatRequest {
236 model: self.model.name.clone(),
237 messages: request
238 .messages
239 .into_iter()
240 .map(|msg| match msg.role {
241 Role::User => ChatMessage::User {
242 content: msg.string_contents(),
243 },
244 Role::Assistant => ChatMessage::Assistant {
245 content: msg.string_contents(),
246 tool_calls: None,
247 },
248 Role::System => ChatMessage::System {
249 content: msg.string_contents(),
250 },
251 })
252 .collect(),
253 keep_alive: self.model.keep_alive.clone().unwrap_or_default(),
254 stream: true,
255 options: Some(ChatOptions {
256 num_ctx: Some(self.model.max_tokens),
257 stop: Some(request.stop),
258 temperature: request.temperature.or(Some(1.0)),
259 ..Default::default()
260 }),
261 tools: vec![],
262 }
263 }
264 fn request_completion(
265 &self,
266 request: ChatRequest,
267 cx: &AsyncApp,
268 ) -> BoxFuture<'static, Result<ChatResponseDelta>> {
269 let http_client = self.http_client.clone();
270
271 let Ok(api_url) = cx.update(|cx| {
272 let settings = &AllLanguageModelSettings::get_global(cx).ollama;
273 settings.api_url.clone()
274 }) else {
275 return futures::future::ready(Err(anyhow!("App state dropped"))).boxed();
276 };
277
278 async move { ollama::complete(http_client.as_ref(), &api_url, request).await }.boxed()
279 }
280}
281
282impl LanguageModel for OllamaLanguageModel {
283 fn id(&self) -> LanguageModelId {
284 self.id.clone()
285 }
286
287 fn name(&self) -> LanguageModelName {
288 LanguageModelName::from(self.model.display_name().to_string())
289 }
290
291 fn provider_id(&self) -> LanguageModelProviderId {
292 LanguageModelProviderId(PROVIDER_ID.into())
293 }
294
295 fn provider_name(&self) -> LanguageModelProviderName {
296 LanguageModelProviderName(PROVIDER_NAME.into())
297 }
298
299 fn telemetry_id(&self) -> String {
300 format!("ollama/{}", self.model.id())
301 }
302
303 fn max_token_count(&self) -> usize {
304 self.model.max_token_count()
305 }
306
307 fn count_tokens(
308 &self,
309 request: LanguageModelRequest,
310 _cx: &App,
311 ) -> BoxFuture<'static, Result<usize>> {
312 // There is no endpoint for this _yet_ in Ollama
313 // see: https://github.com/ollama/ollama/issues/1716 and https://github.com/ollama/ollama/issues/3582
314 let token_count = request
315 .messages
316 .iter()
317 .map(|msg| msg.string_contents().chars().count())
318 .sum::<usize>()
319 / 4;
320
321 async move { Ok(token_count) }.boxed()
322 }
323
324 fn stream_completion(
325 &self,
326 request: LanguageModelRequest,
327 cx: &AsyncApp,
328 ) -> BoxFuture<'static, Result<BoxStream<'static, Result<LanguageModelCompletionEvent>>>> {
329 let request = self.to_ollama_request(request);
330
331 let http_client = self.http_client.clone();
332 let Ok(api_url) = cx.update(|cx| {
333 let settings = &AllLanguageModelSettings::get_global(cx).ollama;
334 settings.api_url.clone()
335 }) else {
336 return futures::future::ready(Err(anyhow!("App state dropped"))).boxed();
337 };
338
339 let future = self.request_limiter.stream(async move {
340 let response = stream_chat_completion(http_client.as_ref(), &api_url, request).await?;
341 let stream = response
342 .filter_map(|response| async move {
343 match response {
344 Ok(delta) => {
345 let content = match delta.message {
346 ChatMessage::User { content } => content,
347 ChatMessage::Assistant { content, .. } => content,
348 ChatMessage::System { content } => content,
349 };
350 Some(Ok(content))
351 }
352 Err(error) => Some(Err(error)),
353 }
354 })
355 .boxed();
356 Ok(stream)
357 });
358
359 async move {
360 Ok(future
361 .await?
362 .map(|result| result.map(LanguageModelCompletionEvent::Text))
363 .boxed())
364 }
365 .boxed()
366 }
367
368 fn use_any_tool(
369 &self,
370 request: LanguageModelRequest,
371 tool_name: String,
372 tool_description: String,
373 schema: serde_json::Value,
374 cx: &AsyncApp,
375 ) -> BoxFuture<'static, Result<BoxStream<'static, Result<String>>>> {
376 use ollama::{OllamaFunctionTool, OllamaTool};
377 let function = OllamaFunctionTool {
378 name: tool_name.clone(),
379 description: Some(tool_description),
380 parameters: Some(schema),
381 };
382 let tools = vec![OllamaTool::Function { function }];
383 let request = self.to_ollama_request(request).with_tools(tools);
384 let response = self.request_completion(request, cx);
385 self.request_limiter
386 .run(async move {
387 let response = response.await?;
388 let ChatMessage::Assistant { tool_calls, .. } = response.message else {
389 bail!("message does not have an assistant role");
390 };
391 if let Some(tool_calls) = tool_calls.filter(|calls| !calls.is_empty()) {
392 for call in tool_calls {
393 let OllamaToolCall::Function(function) = call;
394 if function.name == tool_name {
395 return Ok(futures::stream::once(async move {
396 Ok(function.arguments.to_string())
397 })
398 .boxed());
399 }
400 }
401 } else {
402 bail!("assistant message does not have any tool calls");
403 };
404
405 bail!("tool not used")
406 })
407 .boxed()
408 }
409}
410
411struct ConfigurationView {
412 state: gpui::Entity<State>,
413 loading_models_task: Option<Task<()>>,
414}
415
416impl ConfigurationView {
417 pub fn new(state: gpui::Entity<State>, window: &mut Window, cx: &mut Context<Self>) -> Self {
418 let loading_models_task = Some(cx.spawn_in(window, {
419 let state = state.clone();
420 |this, mut cx| async move {
421 if let Some(task) = state
422 .update(&mut cx, |state, cx| state.authenticate(cx))
423 .log_err()
424 {
425 task.await.log_err();
426 }
427 this.update(&mut cx, |this, cx| {
428 this.loading_models_task = None;
429 cx.notify();
430 })
431 .log_err();
432 }
433 }));
434
435 Self {
436 state,
437 loading_models_task,
438 }
439 }
440
441 fn retry_connection(&self, cx: &mut App) {
442 self.state
443 .update(cx, |state, cx| state.fetch_models(cx))
444 .detach_and_log_err(cx);
445 }
446}
447
448impl Render for ConfigurationView {
449 fn render(&mut self, _: &mut Window, cx: &mut Context<Self>) -> impl IntoElement {
450 let is_authenticated = self.state.read(cx).is_authenticated();
451
452 let ollama_intro = "Get up and running with Llama 3.3, Mistral, Gemma 2, and other large language models with Ollama.";
453 let ollama_reqs =
454 "Ollama must be running with at least one model installed to use it in the assistant.";
455
456 let inline_code_bg = cx.theme().colors().editor_foreground.opacity(0.05);
457
458 if self.loading_models_task.is_some() {
459 div().child(Label::new("Loading models...")).into_any()
460 } else {
461 v_flex()
462 .size_full()
463 .gap_3()
464 .child(
465 v_flex()
466 .size_full()
467 .gap_2()
468 .p_1()
469 .child(Label::new(ollama_intro))
470 .child(Label::new(ollama_reqs))
471 .child(
472 h_flex()
473 .gap_0p5()
474 .child(Label::new("Once installed, try "))
475 .child(
476 div()
477 .bg(inline_code_bg)
478 .px_1p5()
479 .rounded_md()
480 .child(Label::new("ollama run llama3.2")),
481 ),
482 ),
483 )
484 .child(
485 h_flex()
486 .w_full()
487 .pt_2()
488 .justify_between()
489 .gap_2()
490 .child(
491 h_flex()
492 .w_full()
493 .gap_2()
494 .map(|this| {
495 if is_authenticated {
496 this.child(
497 Button::new("ollama-site", "Ollama")
498 .style(ButtonStyle::Subtle)
499 .icon(IconName::ArrowUpRight)
500 .icon_size(IconSize::XSmall)
501 .icon_color(Color::Muted)
502 .on_click(move |_, _, cx| cx.open_url(OLLAMA_SITE))
503 .into_any_element(),
504 )
505 } else {
506 this.child(
507 Button::new(
508 "download_ollama_button",
509 "Download Ollama",
510 )
511 .style(ButtonStyle::Subtle)
512 .icon(IconName::ArrowUpRight)
513 .icon_size(IconSize::XSmall)
514 .icon_color(Color::Muted)
515 .on_click(move |_, _, cx| {
516 cx.open_url(OLLAMA_DOWNLOAD_URL)
517 })
518 .into_any_element(),
519 )
520 }
521 })
522 .child(
523 Button::new("view-models", "All Models")
524 .style(ButtonStyle::Subtle)
525 .icon(IconName::ArrowUpRight)
526 .icon_size(IconSize::XSmall)
527 .icon_color(Color::Muted)
528 .on_click(move |_, _, cx| cx.open_url(OLLAMA_LIBRARY_URL)),
529 ),
530 )
531 .child(if is_authenticated {
532 // This is only a button to ensure the spacing is correct
533 // it should stay disabled
534 ButtonLike::new("connected")
535 .disabled(true)
536 // Since this won't ever be clickable, we can use the arrow cursor
537 .cursor_style(gpui::CursorStyle::Arrow)
538 .child(
539 h_flex()
540 .gap_2()
541 .child(Indicator::dot().color(Color::Success))
542 .child(Label::new("Connected"))
543 .into_any_element(),
544 )
545 .into_any_element()
546 } else {
547 Button::new("retry_ollama_models", "Connect")
548 .icon_position(IconPosition::Start)
549 .icon(IconName::ArrowCircle)
550 .on_click(
551 cx.listener(move |this, _, _, cx| this.retry_connection(cx)),
552 )
553 .into_any_element()
554 }),
555 )
556 .into_any()
557 }
558 }
559}