Detailed changes
@@ -92,9 +92,9 @@ impl Render for EditPredictionButton {
return div().hidden();
}
- let all_language_settings = all_language_settings(None, cx);
+ let language_settings = all_language_settings(None, cx);
- match all_language_settings.edit_predictions.provider {
+ match language_settings.edit_predictions.provider {
EditPredictionProvider::Copilot => {
let Some(copilot) = Copilot::global(cx) else {
return div().hidden();
@@ -298,8 +298,6 @@ impl Render for EditPredictionButton {
let enabled = self.editor_enabled.unwrap_or(true);
let this = cx.weak_entity();
- let tooltip_meta = "Powered by Ollama";
-
div().child(
PopoverMenu::new("ollama")
.menu(move |window, cx| {
@@ -323,6 +321,22 @@ impl Render for EditPredictionButton {
))
}),
move |_window, cx| {
+ let settings = all_language_settings(None, cx);
+ let tooltip_meta = match settings
+ .edit_predictions
+ .ollama
+ .model
+ .as_deref()
+ {
+ Some(model) if !model.trim().is_empty() => {
+ format!("Powered by Ollama ({model})")
+ }
+ _ => {
+ "Ollama model not configured — configure a model before use"
+ .to_string()
+ }
+ };
+
Tooltip::with_meta(
"Edit Prediction",
Some(&ToggleMenu),
@@ -2,7 +2,24 @@ mod ollama_edit_prediction_delegate;
pub use ollama_edit_prediction_delegate::OllamaEditPredictionDelegate;
-use anyhow::{Context as _, Result};
+use anyhow::{Context, Result};
+
+pub const RECOMMENDED_EDIT_PREDICTION_MODELS: [&str; 4] = [
+ "qwen2.5-coder:3b-base",
+ "qwen2.5-coder:7b-base",
+ "qwen2.5-coder:3b",
+ "qwen2.5-coder:7b",
+];
+
+pub fn pick_recommended_edit_prediction_model<'a>(
+ available_models: impl IntoIterator<Item = &'a str>,
+) -> Option<&'static str> {
+ let available: std::collections::HashSet<&str> = available_models.into_iter().collect();
+
+ RECOMMENDED_EDIT_PREDICTION_MODELS
+ .into_iter()
+ .find(|recommended| available.contains(recommended))
+}
use futures::{AsyncBufReadExt, AsyncReadExt, StreamExt, io::BufReader, stream::BoxStream};
use http_client::{AsyncBody, HttpClient, HttpRequestExt, Method, Request as HttpRequest};
use serde::{Deserialize, Serialize};
@@ -16,7 +16,7 @@ use std::{
};
use text::ToOffset;
-use crate::OLLAMA_API_URL;
+use crate::{OLLAMA_API_URL, get_models, pick_recommended_edit_prediction_model};
pub const DEBOUNCE_TIMEOUT: Duration = Duration::from_millis(150);
@@ -166,12 +166,7 @@ impl EditPredictionDelegate for OllamaEditPredictionDelegate {
let http_client = self.http_client.clone();
let settings = all_language_settings(None, cx);
- let model = settings
- .edit_predictions
- .ollama
- .model
- .clone()
- .unwrap_or_else(|| "qwen2.5-coder:3b-base".to_string());
+ let configured_model = settings.edit_predictions.ollama.model.clone();
let api_url = settings
.edit_predictions
.ollama
@@ -185,6 +180,31 @@ impl EditPredictionDelegate for OllamaEditPredictionDelegate {
cx.background_executor().timer(DEBOUNCE_TIMEOUT).await;
}
+ let model = if let Some(model) = configured_model
+ .as_deref()
+ .map(str::trim)
+ .filter(|model| !model.is_empty())
+ {
+ model.to_string()
+ } else {
+ let local_models = get_models(http_client.as_ref(), &api_url, None).await?;
+ let available_model_names = local_models.iter().map(|model| model.name.as_str());
+
+ match pick_recommended_edit_prediction_model(available_model_names) {
+ Some(recommended) => recommended.to_string(),
+ None => {
+ log::debug!(
+ "Ollama: No model configured and no recommended local model found; skipping edit prediction"
+ );
+ this.update(cx, |this, cx| {
+ this.pending_request = None;
+ cx.notify();
+ })?;
+ return Ok(());
+ }
+ }
+ };
+
let cursor_offset = cursor_position.to_offset(&snapshot);
let cursor_point = cursor_offset.to_point(&snapshot);
let excerpt = EditPredictionExcerpt::select_from_buffer(
@@ -214,7 +214,7 @@ pub struct CodestralSettingsContent {
pub struct OllamaEditPredictionSettingsContent {
/// Model to use for completions.
///
- /// Default: "qwen2.5-coder:1.5b"
+ /// Default: none
pub model: Option<String>,
/// Api URL to use for completions.
///