From 7c11b03838d14a949f382337a91156b18580c9ec Mon Sep 17 00:00:00 2001 From: Max Brunsfeld Date: Tue, 3 Feb 2026 21:24:30 -0700 Subject: [PATCH] Eagerly connect to Ollama (#48318) Previously, Ollama would not show up as available until you opened some UI that prompted connecting to Ollama, like the agent panel or the edit prediction settings. Release Notes: - N/A --- Cargo.lock | 1 - crates/edit_prediction/src/ollama.rs | 27 ++++++++++++++++--- .../src/edit_prediction_button.rs | 1 + crates/settings_ui/Cargo.toml | 1 - .../src/components/ollama_model_picker.rs | 25 +---------------- 5 files changed, 26 insertions(+), 29 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 72e72da8290872318da251914d6cf9d794ee130b..0587c5a0a45c448f6f340ba4ff1cbe9502cb54ad 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -15215,7 +15215,6 @@ dependencies = [ "heck 0.5.0", "itertools 0.14.0", "language", - "language_model", "language_models", "log", "menu", diff --git a/crates/edit_prediction/src/ollama.rs b/crates/edit_prediction/src/ollama.rs index 5aa3ad20d5c61f50b6e669ebfe795ca6bff13a92..91d3b542b1dfc717ba9435ff035359a3c784dced 100644 --- a/crates/edit_prediction/src/ollama.rs +++ b/crates/edit_prediction/src/ollama.rs @@ -8,7 +8,7 @@ use crate::{ }; use anyhow::{Context as _, Result}; use futures::AsyncReadExt as _; -use gpui::{App, AppContext as _, Entity, Task, http_client}; +use gpui::{App, AppContext as _, Entity, SharedString, Task, http_client}; use language::{ Anchor, Buffer, BufferSnapshot, OffsetRangeExt as _, ToOffset, ToPoint as _, language_settings::all_language_settings, @@ -51,13 +51,34 @@ struct OllamaGenerateResponse { response: String, } +const PROVIDER_ID: LanguageModelProviderId = LanguageModelProviderId::new("ollama"); + pub fn is_available(cx: &App) -> bool { - let ollama_provider_id = LanguageModelProviderId::new("ollama"); LanguageModelRegistry::read_global(cx) - .provider(&ollama_provider_id) + .provider(&PROVIDER_ID) .is_some_and(|provider| provider.is_authenticated(cx)) } +pub fn ensure_authenticated(cx: &mut App) { + if let Some(provider) = LanguageModelRegistry::read_global(cx).provider(&PROVIDER_ID) { + provider.authenticate(cx).detach_and_log_err(cx); + } +} + +pub fn fetch_models(cx: &mut App) -> Vec { + let Some(provider) = LanguageModelRegistry::read_global(cx).provider(&PROVIDER_ID) else { + return Vec::new(); + }; + provider.authenticate(cx).detach_and_log_err(cx); + let mut models: Vec = provider + .provided_models(cx) + .into_iter() + .map(|model| SharedString::from(model.id().0.to_string())) + .collect(); + models.sort(); + models +} + /// Output from the Ollama HTTP request, containing all data needed to create the prediction result. struct OllamaRequestOutput { created_at: String, diff --git a/crates/edit_prediction_ui/src/edit_prediction_button.rs b/crates/edit_prediction_ui/src/edit_prediction_button.rs index 1087264825d0887080f2942943760046b5185c22..153732afafd95eba01ed6aaabd68766920705d9d 100644 --- a/crates/edit_prediction_ui/src/edit_prediction_button.rs +++ b/crates/edit_prediction_ui/src/edit_prediction_button.rs @@ -560,6 +560,7 @@ impl EditPredictionButton { cx.observe_global::(move |_, cx| cx.notify()) .detach(); + edit_prediction::ollama::ensure_authenticated(cx); let sweep_api_token_task = edit_prediction::sweep_ai::load_sweep_api_token(cx); let mercury_api_token_task = edit_prediction::mercury::load_mercury_api_token(cx); diff --git a/crates/settings_ui/Cargo.toml b/crates/settings_ui/Cargo.toml index 1a8984d50e07d8c8254a5d4ba8b0a9596f5ea1c6..81fb7503697f33c4aa3716807369a5327c32e114 100644 --- a/crates/settings_ui/Cargo.toml +++ b/crates/settings_ui/Cargo.toml @@ -31,7 +31,6 @@ fuzzy.workspace = true gpui.workspace = true heck.workspace = true itertools.workspace = true -language_model.workspace = true language_models.workspace = true language.workspace = true log.workspace = true diff --git a/crates/settings_ui/src/components/ollama_model_picker.rs b/crates/settings_ui/src/components/ollama_model_picker.rs index fbd571009fdce71cb635234b9421256c2f6bca58..268bf196bce3d0fc16a20eac5974fe78ec8532fd 100644 --- a/crates/settings_ui/src/components/ollama_model_picker.rs +++ b/crates/settings_ui/src/components/ollama_model_picker.rs @@ -2,7 +2,6 @@ use std::sync::Arc; use fuzzy::StringMatch; use gpui::{AnyElement, App, Context, DismissEvent, ReadGlobal, SharedString, Task, Window, px}; -use language_model::{LanguageModelProviderId, LanguageModelRegistry}; use picker::{Picker, PickerDelegate}; use settings::SettingsStore; use ui::{ListItem, ListItemSpacing, PopoverMenu, prelude::*}; @@ -28,7 +27,7 @@ impl OllamaModelPickerDelegate { on_model_changed: impl Fn(SharedString, &mut Window, &mut App) + 'static, cx: &mut Context, ) -> Self { - let mut models = Self::fetch_ollama_models(cx); + let mut models = edit_prediction::ollama::fetch_models(cx); let current_in_list = models.contains(¤t_model); if !current_model.is_empty() && !current_in_list { @@ -58,28 +57,6 @@ impl OllamaModelPickerDelegate { on_model_changed: Arc::new(on_model_changed), } } - - fn fetch_ollama_models(cx: &mut App) -> Vec { - let ollama_provider_id = LanguageModelProviderId::new("ollama"); - - let Some(provider) = LanguageModelRegistry::read_global(cx).provider(&ollama_provider_id) - else { - return Vec::new(); - }; - - // Re-fetch models in case ollama has been started or updated since - // Zed was launched. - provider.authenticate(cx).detach_and_log_err(cx); - - let mut models: Vec = provider - .provided_models(cx) - .into_iter() - .map(|model| SharedString::from(model.id().0.to_string())) - .collect(); - - models.sort(); - models - } } impl PickerDelegate for OllamaModelPickerDelegate {