ollama.rs

  1use anyhow::{Result, anyhow};
  2use fs::Fs;
  3use futures::{FutureExt, StreamExt, future::BoxFuture, stream::BoxStream};
  4use futures::{Stream, TryFutureExt, stream};
  5use gpui::{AnyView, App, AsyncApp, Context, Task};
  6use http_client::HttpClient;
  7use language_model::{
  8    AuthenticateError, LanguageModel, LanguageModelCompletionError, LanguageModelCompletionEvent,
  9    LanguageModelId, LanguageModelName, LanguageModelProvider, LanguageModelProviderId,
 10    LanguageModelProviderName, LanguageModelProviderState, LanguageModelRequest,
 11    LanguageModelRequestTool, LanguageModelToolChoice, LanguageModelToolUse,
 12    LanguageModelToolUseId, MessageContent, RateLimiter, Role, StopReason, TokenUsage,
 13};
 14use menu;
 15use ollama::{
 16    ChatMessage, ChatOptions, ChatRequest, ChatResponseDelta, KeepAlive, OLLAMA_API_URL,
 17    OllamaFunctionCall, OllamaFunctionTool, OllamaToolCall, get_models, show_model,
 18    stream_chat_completion,
 19};
 20use schemars::JsonSchema;
 21use serde::{Deserialize, Serialize};
 22use settings::{Settings, SettingsStore, update_settings_file};
 23use std::pin::Pin;
 24use std::sync::LazyLock;
 25use std::sync::atomic::{AtomicU64, Ordering};
 26use std::{collections::HashMap, sync::Arc};
 27use ui::{ButtonLike, ElevationIndex, List, Tooltip, prelude::*};
 28use ui_input::SingleLineInput;
 29use zed_env_vars::{EnvVar, env_var};
 30
 31use crate::AllLanguageModelSettings;
 32use crate::api_key::ApiKeyState;
 33use crate::ui::InstructionListItem;
 34
 35const OLLAMA_DOWNLOAD_URL: &str = "https://ollama.com/download";
 36const OLLAMA_LIBRARY_URL: &str = "https://ollama.com/library";
 37const OLLAMA_SITE: &str = "https://ollama.com/";
 38
 39const PROVIDER_ID: LanguageModelProviderId = LanguageModelProviderId::new("ollama");
 40const PROVIDER_NAME: LanguageModelProviderName = LanguageModelProviderName::new("Ollama");
 41
 42const API_KEY_ENV_VAR_NAME: &str = "OLLAMA_API_KEY";
 43static API_KEY_ENV_VAR: LazyLock<EnvVar> = env_var!(API_KEY_ENV_VAR_NAME);
 44
 45#[derive(Default, Debug, Clone, PartialEq)]
 46pub struct OllamaSettings {
 47    pub api_url: String,
 48    pub available_models: Vec<AvailableModel>,
 49}
 50
 51#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema)]
 52pub struct AvailableModel {
 53    /// The model name in the Ollama API (e.g. "llama3.2:latest")
 54    pub name: String,
 55    /// The model's name in Zed's UI, such as in the model selector dropdown menu in the assistant panel.
 56    pub display_name: Option<String>,
 57    /// The Context Length parameter to the model (aka num_ctx or n_ctx)
 58    pub max_tokens: u64,
 59    /// The number of seconds to keep the connection open after the last request
 60    pub keep_alive: Option<KeepAlive>,
 61    /// Whether the model supports tools
 62    pub supports_tools: Option<bool>,
 63    /// Whether the model supports vision
 64    pub supports_images: Option<bool>,
 65    /// Whether to enable think mode
 66    pub supports_thinking: Option<bool>,
 67}
 68
 69pub struct OllamaLanguageModelProvider {
 70    http_client: Arc<dyn HttpClient>,
 71    state: gpui::Entity<State>,
 72}
 73
 74pub struct State {
 75    api_key_state: ApiKeyState,
 76    http_client: Arc<dyn HttpClient>,
 77    fetched_models: Vec<ollama::Model>,
 78    fetch_model_task: Option<Task<Result<()>>>,
 79}
 80
 81impl State {
 82    fn is_authenticated(&self) -> bool {
 83        !self.fetched_models.is_empty()
 84    }
 85
 86    fn set_api_key(&mut self, api_key: Option<String>, cx: &mut Context<Self>) -> Task<Result<()>> {
 87        let api_url = OllamaLanguageModelProvider::api_url(cx);
 88        let task = self
 89            .api_key_state
 90            .store(api_url, api_key, |this| &mut this.api_key_state, cx);
 91
 92        self.fetched_models.clear();
 93        cx.spawn(async move |this, cx| {
 94            let result = task.await;
 95            this.update(cx, |this, cx| this.restart_fetch_models_task(cx))
 96                .ok();
 97            result
 98        })
 99    }
100
101    fn authenticate(&mut self, cx: &mut Context<Self>) -> Task<Result<(), AuthenticateError>> {
102        let api_url = OllamaLanguageModelProvider::api_url(cx);
103        let task = self.api_key_state.load_if_needed(
104            api_url,
105            &API_KEY_ENV_VAR,
106            |this| &mut this.api_key_state,
107            cx,
108        );
109
110        // Always try to fetch models - if no API key is needed (local Ollama), it will work
111        // If API key is needed and provided, it will work
112        // If API key is needed and not provided, it will fail gracefully
113        cx.spawn(async move |this, cx| {
114            let result = task.await;
115            this.update(cx, |this, cx| this.restart_fetch_models_task(cx))
116                .ok();
117            result
118        })
119    }
120
121    fn fetch_models(&mut self, cx: &mut Context<Self>) -> Task<Result<()>> {
122        let http_client = Arc::clone(&self.http_client);
123        let api_url = OllamaLanguageModelProvider::api_url(cx);
124        let api_key = self.api_key_state.key(&api_url);
125
126        // As a proxy for the server being "authenticated", we'll check if its up by fetching the models
127        cx.spawn(async move |this, cx| {
128            let models =
129                get_models(http_client.as_ref(), &api_url, api_key.as_deref(), None).await?;
130
131            let tasks = models
132                .into_iter()
133                // Since there is no metadata from the Ollama API
134                // indicating which models are embedding models,
135                // simply filter out models with "-embed" in their name
136                .filter(|model| !model.name.contains("-embed"))
137                .map(|model| {
138                    let http_client = Arc::clone(&http_client);
139                    let api_url = api_url.clone();
140                    let api_key = api_key.clone();
141                    async move {
142                        let name = model.name.as_str();
143                        let capabilities =
144                            show_model(http_client.as_ref(), &api_url, api_key.as_deref(), name)
145                                .await?;
146                        let ollama_model = ollama::Model::new(
147                            name,
148                            None,
149                            None,
150                            Some(capabilities.supports_tools()),
151                            Some(capabilities.supports_vision()),
152                            Some(capabilities.supports_thinking()),
153                        );
154                        Ok(ollama_model)
155                    }
156                });
157
158            // Rate-limit capability fetches
159            // since there is an arbitrary number of models available
160            let mut ollama_models: Vec<_> = futures::stream::iter(tasks)
161                .buffer_unordered(5)
162                .collect::<Vec<Result<_>>>()
163                .await
164                .into_iter()
165                .collect::<Result<Vec<_>>>()?;
166
167            ollama_models.sort_by(|a, b| a.name.cmp(&b.name));
168
169            this.update(cx, |this, cx| {
170                this.fetched_models = ollama_models;
171                cx.notify();
172            })
173        })
174    }
175
176    fn restart_fetch_models_task(&mut self, cx: &mut Context<Self>) {
177        let task = self.fetch_models(cx);
178        self.fetch_model_task.replace(task);
179    }
180}
181
182impl OllamaLanguageModelProvider {
183    pub fn new(http_client: Arc<dyn HttpClient>, cx: &mut App) -> Self {
184        let this = Self {
185            http_client: http_client.clone(),
186            state: cx.new(|cx| {
187                cx.observe_global::<SettingsStore>({
188                    let mut last_settings = OllamaLanguageModelProvider::settings(cx).clone();
189                    move |this: &mut State, cx| {
190                        let current_settings = OllamaLanguageModelProvider::settings(cx);
191                        let settings_changed = current_settings != &last_settings;
192                        if settings_changed {
193                            let url_changed = last_settings.api_url != current_settings.api_url;
194                            last_settings = current_settings.clone();
195                            if url_changed {
196                                this.fetched_models.clear();
197                                this.authenticate(cx).detach();
198                            }
199                            cx.notify();
200                        }
201                    }
202                })
203                .detach();
204
205                State {
206                    http_client,
207                    fetched_models: Default::default(),
208                    fetch_model_task: None,
209                    api_key_state: ApiKeyState::new(Self::api_url(cx)),
210                }
211            }),
212        };
213        this
214    }
215
216    fn settings(cx: &App) -> &OllamaSettings {
217        &AllLanguageModelSettings::get_global(cx).ollama
218    }
219
220    fn api_url(cx: &App) -> SharedString {
221        let api_url = &Self::settings(cx).api_url;
222        if api_url.is_empty() {
223            OLLAMA_API_URL.into()
224        } else {
225            SharedString::new(api_url.as_str())
226        }
227    }
228}
229
230impl LanguageModelProviderState for OllamaLanguageModelProvider {
231    type ObservableEntity = State;
232
233    fn observable_entity(&self) -> Option<gpui::Entity<Self::ObservableEntity>> {
234        Some(self.state.clone())
235    }
236}
237
238impl LanguageModelProvider for OllamaLanguageModelProvider {
239    fn id(&self) -> LanguageModelProviderId {
240        PROVIDER_ID
241    }
242
243    fn name(&self) -> LanguageModelProviderName {
244        PROVIDER_NAME
245    }
246
247    fn icon(&self) -> IconName {
248        IconName::AiOllama
249    }
250
251    fn default_model(&self, _: &App) -> Option<Arc<dyn LanguageModel>> {
252        // We shouldn't try to select default model, because it might lead to a load call for an unloaded model.
253        // In a constrained environment where user might not have enough resources it'll be a bad UX to select something
254        // to load by default.
255        None
256    }
257
258    fn default_fast_model(&self, _: &App) -> Option<Arc<dyn LanguageModel>> {
259        // See explanation for default_model.
260        None
261    }
262
263    fn provided_models(&self, cx: &App) -> Vec<Arc<dyn LanguageModel>> {
264        let mut models: HashMap<String, ollama::Model> = HashMap::new();
265
266        // Add models from the Ollama API
267        for model in self.state.read(cx).fetched_models.iter() {
268            models.insert(model.name.clone(), model.clone());
269        }
270
271        // Override with available models from settings
272        for model in &OllamaLanguageModelProvider::settings(cx).available_models {
273            models.insert(
274                model.name.clone(),
275                ollama::Model {
276                    name: model.name.clone(),
277                    display_name: model.display_name.clone(),
278                    max_tokens: model.max_tokens,
279                    keep_alive: model.keep_alive.clone(),
280                    supports_tools: model.supports_tools,
281                    supports_vision: model.supports_images,
282                    supports_thinking: model.supports_thinking,
283                },
284            );
285        }
286
287        let mut models = models
288            .into_values()
289            .map(|model| {
290                Arc::new(OllamaLanguageModel {
291                    id: LanguageModelId::from(model.name.clone()),
292                    model,
293                    http_client: self.http_client.clone(),
294                    request_limiter: RateLimiter::new(4),
295                    state: self.state.clone(),
296                }) as Arc<dyn LanguageModel>
297            })
298            .collect::<Vec<_>>();
299        models.sort_by_key(|model| model.name());
300        models
301    }
302
303    fn is_authenticated(&self, cx: &App) -> bool {
304        self.state.read(cx).is_authenticated()
305    }
306
307    fn authenticate(&self, cx: &mut App) -> Task<Result<(), AuthenticateError>> {
308        self.state.update(cx, |state, cx| state.authenticate(cx))
309    }
310
311    fn configuration_view(
312        &self,
313        _target_agent: language_model::ConfigurationViewTargetAgent,
314        window: &mut Window,
315        cx: &mut App,
316    ) -> AnyView {
317        let state = self.state.clone();
318        cx.new(|cx| ConfigurationView::new(state, window, cx))
319            .into()
320    }
321
322    fn reset_credentials(&self, cx: &mut App) -> Task<Result<()>> {
323        self.state
324            .update(cx, |state, cx| state.set_api_key(None, cx))
325    }
326}
327
328pub struct OllamaLanguageModel {
329    id: LanguageModelId,
330    model: ollama::Model,
331    http_client: Arc<dyn HttpClient>,
332    request_limiter: RateLimiter,
333    state: gpui::Entity<State>,
334}
335
336impl OllamaLanguageModel {
337    fn to_ollama_request(&self, request: LanguageModelRequest) -> ChatRequest {
338        let supports_vision = self.model.supports_vision.unwrap_or(false);
339
340        let mut messages = Vec::with_capacity(request.messages.len());
341
342        for mut msg in request.messages.into_iter() {
343            let images = if supports_vision {
344                msg.content
345                    .iter()
346                    .filter_map(|content| match content {
347                        MessageContent::Image(image) => Some(image.source.to_string()),
348                        _ => None,
349                    })
350                    .collect::<Vec<String>>()
351            } else {
352                vec![]
353            };
354
355            match msg.role {
356                Role::User => {
357                    for tool_result in msg
358                        .content
359                        .extract_if(.., |x| matches!(x, MessageContent::ToolResult(..)))
360                    {
361                        match tool_result {
362                            MessageContent::ToolResult(tool_result) => {
363                                messages.push(ChatMessage::Tool {
364                                    tool_name: tool_result.tool_name.to_string(),
365                                    content: tool_result.content.to_str().unwrap_or("").to_string(),
366                                })
367                            }
368                            _ => unreachable!("Only tool result should be extracted"),
369                        }
370                    }
371                    if !msg.content.is_empty() {
372                        messages.push(ChatMessage::User {
373                            content: msg.string_contents(),
374                            images: if images.is_empty() {
375                                None
376                            } else {
377                                Some(images)
378                            },
379                        })
380                    }
381                }
382                Role::Assistant => {
383                    let content = msg.string_contents();
384                    let mut thinking = None;
385                    let mut tool_calls = Vec::new();
386                    for content in msg.content.into_iter() {
387                        match content {
388                            MessageContent::Thinking { text, .. } if !text.is_empty() => {
389                                thinking = Some(text)
390                            }
391                            MessageContent::ToolUse(tool_use) => {
392                                tool_calls.push(OllamaToolCall::Function(OllamaFunctionCall {
393                                    name: tool_use.name.to_string(),
394                                    arguments: tool_use.input,
395                                }));
396                            }
397                            _ => (),
398                        }
399                    }
400                    messages.push(ChatMessage::Assistant {
401                        content,
402                        tool_calls: Some(tool_calls),
403                        images: if images.is_empty() {
404                            None
405                        } else {
406                            Some(images)
407                        },
408                        thinking,
409                    })
410                }
411                Role::System => messages.push(ChatMessage::System {
412                    content: msg.string_contents(),
413                }),
414            }
415        }
416        ChatRequest {
417            model: self.model.name.clone(),
418            messages,
419            keep_alive: self.model.keep_alive.clone().unwrap_or_default(),
420            stream: true,
421            options: Some(ChatOptions {
422                num_ctx: Some(self.model.max_tokens),
423                stop: Some(request.stop),
424                temperature: request.temperature.or(Some(1.0)),
425                ..Default::default()
426            }),
427            think: self
428                .model
429                .supports_thinking
430                .map(|supports_thinking| supports_thinking && request.thinking_allowed),
431            tools: if self.model.supports_tools.unwrap_or(false) {
432                request.tools.into_iter().map(tool_into_ollama).collect()
433            } else {
434                vec![]
435            },
436        }
437    }
438}
439
440impl LanguageModel for OllamaLanguageModel {
441    fn id(&self) -> LanguageModelId {
442        self.id.clone()
443    }
444
445    fn name(&self) -> LanguageModelName {
446        LanguageModelName::from(self.model.display_name().to_string())
447    }
448
449    fn provider_id(&self) -> LanguageModelProviderId {
450        PROVIDER_ID
451    }
452
453    fn provider_name(&self) -> LanguageModelProviderName {
454        PROVIDER_NAME
455    }
456
457    fn supports_tools(&self) -> bool {
458        self.model.supports_tools.unwrap_or(false)
459    }
460
461    fn supports_images(&self) -> bool {
462        self.model.supports_vision.unwrap_or(false)
463    }
464
465    fn supports_tool_choice(&self, choice: LanguageModelToolChoice) -> bool {
466        match choice {
467            LanguageModelToolChoice::Auto => false,
468            LanguageModelToolChoice::Any => false,
469            LanguageModelToolChoice::None => false,
470        }
471    }
472
473    fn telemetry_id(&self) -> String {
474        format!("ollama/{}", self.model.id())
475    }
476
477    fn max_token_count(&self) -> u64 {
478        self.model.max_token_count()
479    }
480
481    fn count_tokens(
482        &self,
483        request: LanguageModelRequest,
484        _cx: &App,
485    ) -> BoxFuture<'static, Result<u64>> {
486        // There is no endpoint for this _yet_ in Ollama
487        // see: https://github.com/ollama/ollama/issues/1716 and https://github.com/ollama/ollama/issues/3582
488        let token_count = request
489            .messages
490            .iter()
491            .map(|msg| msg.string_contents().chars().count())
492            .sum::<usize>()
493            / 4;
494
495        async move { Ok(token_count as u64) }.boxed()
496    }
497
498    fn stream_completion(
499        &self,
500        request: LanguageModelRequest,
501        cx: &AsyncApp,
502    ) -> BoxFuture<
503        'static,
504        Result<
505            BoxStream<'static, Result<LanguageModelCompletionEvent, LanguageModelCompletionError>>,
506            LanguageModelCompletionError,
507        >,
508    > {
509        let request = self.to_ollama_request(request);
510
511        let http_client = self.http_client.clone();
512        let Ok((api_key, api_url)) = self.state.read_with(cx, |state, cx| {
513            let api_url = OllamaLanguageModelProvider::api_url(cx);
514            (state.api_key_state.key(&api_url), api_url)
515        }) else {
516            return futures::future::ready(Err(anyhow!("App state dropped").into())).boxed();
517        };
518
519        let future = self.request_limiter.stream(async move {
520            let stream =
521                stream_chat_completion(http_client.as_ref(), &api_url, api_key.as_deref(), request)
522                    .await?;
523            let stream = map_to_language_model_completion_events(stream);
524            Ok(stream)
525        });
526
527        future.map_ok(|f| f.boxed()).boxed()
528    }
529}
530
531fn map_to_language_model_completion_events(
532    stream: Pin<Box<dyn Stream<Item = anyhow::Result<ChatResponseDelta>> + Send>>,
533) -> impl Stream<Item = Result<LanguageModelCompletionEvent, LanguageModelCompletionError>> {
534    // Used for creating unique tool use ids
535    static TOOL_CALL_COUNTER: AtomicU64 = AtomicU64::new(0);
536
537    struct State {
538        stream: Pin<Box<dyn Stream<Item = anyhow::Result<ChatResponseDelta>> + Send>>,
539        used_tools: bool,
540    }
541
542    // We need to create a ToolUse and Stop event from a single
543    // response from the original stream
544    let stream = stream::unfold(
545        State {
546            stream,
547            used_tools: false,
548        },
549        async move |mut state| {
550            let response = state.stream.next().await?;
551
552            let delta = match response {
553                Ok(delta) => delta,
554                Err(e) => {
555                    let event = Err(LanguageModelCompletionError::from(anyhow!(e)));
556                    return Some((vec![event], state));
557                }
558            };
559
560            let mut events = Vec::new();
561
562            match delta.message {
563                ChatMessage::User { content, images: _ } => {
564                    events.push(Ok(LanguageModelCompletionEvent::Text(content)));
565                }
566                ChatMessage::System { content } => {
567                    events.push(Ok(LanguageModelCompletionEvent::Text(content)));
568                }
569                ChatMessage::Tool { content, .. } => {
570                    events.push(Ok(LanguageModelCompletionEvent::Text(content)));
571                }
572                ChatMessage::Assistant {
573                    content,
574                    tool_calls,
575                    images: _,
576                    thinking,
577                } => {
578                    if let Some(text) = thinking {
579                        events.push(Ok(LanguageModelCompletionEvent::Thinking {
580                            text,
581                            signature: None,
582                        }));
583                    }
584
585                    if let Some(tool_call) = tool_calls.and_then(|v| v.into_iter().next()) {
586                        match tool_call {
587                            OllamaToolCall::Function(function) => {
588                                let tool_id = format!(
589                                    "{}-{}",
590                                    &function.name,
591                                    TOOL_CALL_COUNTER.fetch_add(1, Ordering::Relaxed)
592                                );
593                                let event =
594                                    LanguageModelCompletionEvent::ToolUse(LanguageModelToolUse {
595                                        id: LanguageModelToolUseId::from(tool_id),
596                                        name: Arc::from(function.name),
597                                        raw_input: function.arguments.to_string(),
598                                        input: function.arguments,
599                                        is_input_complete: true,
600                                    });
601                                events.push(Ok(event));
602                                state.used_tools = true;
603                            }
604                        }
605                    } else if !content.is_empty() {
606                        events.push(Ok(LanguageModelCompletionEvent::Text(content)));
607                    }
608                }
609            };
610
611            if delta.done {
612                events.push(Ok(LanguageModelCompletionEvent::UsageUpdate(TokenUsage {
613                    input_tokens: delta.prompt_eval_count.unwrap_or(0),
614                    output_tokens: delta.eval_count.unwrap_or(0),
615                    cache_creation_input_tokens: 0,
616                    cache_read_input_tokens: 0,
617                })));
618                if state.used_tools {
619                    state.used_tools = false;
620                    events.push(Ok(LanguageModelCompletionEvent::Stop(StopReason::ToolUse)));
621                } else {
622                    events.push(Ok(LanguageModelCompletionEvent::Stop(StopReason::EndTurn)));
623                }
624            }
625
626            Some((events, state))
627        },
628    );
629
630    stream.flat_map(futures::stream::iter)
631}
632
633struct ConfigurationView {
634    api_key_editor: gpui::Entity<SingleLineInput>,
635    api_url_editor: gpui::Entity<SingleLineInput>,
636    state: gpui::Entity<State>,
637}
638
639impl ConfigurationView {
640    pub fn new(state: gpui::Entity<State>, window: &mut Window, cx: &mut Context<Self>) -> Self {
641        let api_key_editor =
642            cx.new(|cx| SingleLineInput::new(window, cx, "63e02e...").label("API key"));
643
644        let api_url_editor = cx.new(|cx| {
645            let input = SingleLineInput::new(window, cx, OLLAMA_API_URL).label("API URL");
646            input.set_text(OllamaLanguageModelProvider::api_url(cx), window, cx);
647            input
648        });
649
650        cx.observe(&state, |_, _, cx| {
651            cx.notify();
652        })
653        .detach();
654
655        Self {
656            api_key_editor,
657            api_url_editor,
658            state,
659        }
660    }
661
662    fn retry_connection(&self, cx: &mut App) {
663        self.state
664            .update(cx, |state, cx| state.restart_fetch_models_task(cx));
665    }
666
667    fn save_api_key(&mut self, _: &menu::Confirm, window: &mut Window, cx: &mut Context<Self>) {
668        let api_key = self.api_key_editor.read(cx).text(cx).trim().to_string();
669        if api_key.is_empty() {
670            return;
671        }
672
673        // url changes can cause the editor to be displayed again
674        self.api_key_editor
675            .update(cx, |input, cx| input.set_text("", window, cx));
676
677        let state = self.state.clone();
678        cx.spawn_in(window, async move |_, cx| {
679            state
680                .update(cx, |state, cx| state.set_api_key(Some(api_key), cx))?
681                .await
682        })
683        .detach_and_log_err(cx);
684    }
685
686    fn reset_api_key(&mut self, window: &mut Window, cx: &mut Context<Self>) {
687        self.api_key_editor
688            .update(cx, |input, cx| input.set_text("", window, cx));
689
690        let state = self.state.clone();
691        cx.spawn_in(window, async move |_, cx| {
692            state
693                .update(cx, |state, cx| state.set_api_key(None, cx))?
694                .await
695        })
696        .detach_and_log_err(cx);
697
698        cx.notify();
699    }
700
701    fn save_api_url(&mut self, cx: &mut Context<Self>) {
702        let api_url = self.api_url_editor.read(cx).text(cx).trim().to_string();
703        let current_url = OllamaLanguageModelProvider::api_url(cx);
704        if !api_url.is_empty() && &api_url != &current_url {
705            let fs = <dyn Fs>::global(cx);
706            update_settings_file::<AllLanguageModelSettings>(fs, cx, move |settings, _| {
707                if let Some(settings) = settings.ollama.as_mut() {
708                    settings.api_url = Some(api_url);
709                } else {
710                    settings.ollama = Some(crate::settings::OllamaSettingsContent {
711                        api_url: Some(api_url),
712                        available_models: None,
713                    });
714                }
715            });
716        }
717    }
718
719    fn reset_api_url(&mut self, window: &mut Window, cx: &mut Context<Self>) {
720        self.api_url_editor
721            .update(cx, |input, cx| input.set_text("", window, cx));
722        let fs = <dyn Fs>::global(cx);
723        update_settings_file::<AllLanguageModelSettings>(fs, cx, |settings, _cx| {
724            if let Some(settings) = settings.ollama.as_mut() {
725                settings.api_url = Some(OLLAMA_API_URL.into());
726            }
727        });
728        cx.notify();
729    }
730
731    fn render_instructions() -> Div {
732        v_flex()
733            .gap_2()
734            .child(Label::new(
735                "Run LLMs locally on your machine with Ollama, or connect to an Ollama server. \
736                Can provide access to Llama, Mistral, Gemma, and hundreds of other models.",
737            ))
738            .child(Label::new("To use local Ollama:"))
739            .child(
740                List::new()
741                    .child(InstructionListItem::new(
742                        "Download and install Ollama from",
743                        Some("ollama.com"),
744                        Some("https://ollama.com/download"),
745                    ))
746                    .child(InstructionListItem::text_only(
747                        "Start Ollama and download a model: `ollama run gpt-oss:20b`",
748                    ))
749                    .child(InstructionListItem::text_only(
750                        "Click 'Connect' below to start using Ollama in Zed",
751                    )),
752            )
753            .child(Label::new(
754                "Alternatively, you can connect to an Ollama server by specifying its \
755                URL and API key (may not be required):",
756            ))
757    }
758
759    fn render_api_key_editor(&self, cx: &Context<Self>) -> Div {
760        let state = self.state.read(cx);
761        let env_var_set = state.api_key_state.is_from_env_var();
762
763        if !state.api_key_state.has_key() {
764            v_flex()
765              .on_action(cx.listener(Self::save_api_key))
766              .child(self.api_key_editor.clone())
767              .child(
768                  Label::new(
769                      format!("You can also assign the {API_KEY_ENV_VAR_NAME} environment variable and restart Zed.")
770                  )
771                  .size(LabelSize::Small)
772                  .color(Color::Muted),
773              )
774        } else {
775            h_flex()
776                .p_3()
777                .justify_between()
778                .rounded_md()
779                .border_1()
780                .border_color(cx.theme().colors().border)
781                .bg(cx.theme().colors().elevated_surface_background)
782                .child(
783                    h_flex()
784                        .gap_2()
785                        .child(Icon::new(IconName::Check).color(Color::Success))
786                        .child(
787                            Label::new(
788                                if env_var_set {
789                                    format!("API key set in {API_KEY_ENV_VAR_NAME} environment variable.")
790                                } else {
791                                    "API key configured".to_string()
792                                }
793                            )
794                        )
795                )
796                .child(
797                    Button::new("reset-api-key", "Reset API Key")
798                        .label_size(LabelSize::Small)
799                        .icon(IconName::Undo)
800                        .icon_size(IconSize::Small)
801                        .icon_position(IconPosition::Start)
802                        .layer(ElevationIndex::ModalSurface)
803                        .when(env_var_set, |this| {
804                            this.tooltip(Tooltip::text(format!("To reset your API key, unset the {API_KEY_ENV_VAR_NAME} environment variable.")))
805                        })
806                        .on_click(cx.listener(|this, _, window, cx| this.reset_api_key(window, cx))),
807                )
808        }
809    }
810
811    fn render_api_url_editor(&self, cx: &Context<Self>) -> Div {
812        let api_url = OllamaLanguageModelProvider::api_url(cx);
813        let custom_api_url_set = api_url != OLLAMA_API_URL;
814
815        if custom_api_url_set {
816            h_flex()
817                .p_3()
818                .justify_between()
819                .rounded_md()
820                .border_1()
821                .border_color(cx.theme().colors().border)
822                .bg(cx.theme().colors().elevated_surface_background)
823                .child(
824                    h_flex()
825                        .gap_2()
826                        .child(Icon::new(IconName::Check).color(Color::Success))
827                        .child(v_flex().gap_1().child(Label::new(api_url))),
828                )
829                .child(
830                    Button::new("reset-api-url", "Reset API URL")
831                        .label_size(LabelSize::Small)
832                        .icon(IconName::Undo)
833                        .icon_size(IconSize::Small)
834                        .icon_position(IconPosition::Start)
835                        .layer(ElevationIndex::ModalSurface)
836                        .on_click(
837                            cx.listener(|this, _, window, cx| this.reset_api_url(window, cx)),
838                        ),
839                )
840        } else {
841            v_flex()
842                .on_action(cx.listener(|this, _: &menu::Confirm, _window, cx| {
843                    this.save_api_url(cx);
844                    cx.notify();
845                }))
846                .gap_2()
847                .child(self.api_url_editor.clone())
848        }
849    }
850}
851
852impl Render for ConfigurationView {
853    fn render(&mut self, _: &mut Window, cx: &mut Context<Self>) -> impl IntoElement {
854        let is_authenticated = self.state.read(cx).is_authenticated();
855
856        v_flex()
857            .gap_2()
858            .child(Self::render_instructions())
859            .child(self.render_api_url_editor(cx))
860            .child(self.render_api_key_editor(cx))
861            .child(
862                h_flex()
863                    .w_full()
864                    .justify_between()
865                    .gap_2()
866                    .child(
867                        h_flex()
868                            .w_full()
869                            .gap_2()
870                            .map(|this| {
871                                if is_authenticated {
872                                    this.child(
873                                        Button::new("ollama-site", "Ollama")
874                                            .style(ButtonStyle::Subtle)
875                                            .icon(IconName::ArrowUpRight)
876                                            .icon_size(IconSize::XSmall)
877                                            .icon_color(Color::Muted)
878                                            .on_click(move |_, _, cx| cx.open_url(OLLAMA_SITE))
879                                            .into_any_element(),
880                                    )
881                                } else {
882                                    this.child(
883                                        Button::new("download_ollama_button", "Download Ollama")
884                                            .style(ButtonStyle::Subtle)
885                                            .icon(IconName::ArrowUpRight)
886                                            .icon_size(IconSize::XSmall)
887                                            .icon_color(Color::Muted)
888                                            .on_click(move |_, _, cx| {
889                                                cx.open_url(OLLAMA_DOWNLOAD_URL)
890                                            })
891                                            .into_any_element(),
892                                    )
893                                }
894                            })
895                            .child(
896                                Button::new("view-models", "View All Models")
897                                    .style(ButtonStyle::Subtle)
898                                    .icon(IconName::ArrowUpRight)
899                                    .icon_size(IconSize::XSmall)
900                                    .icon_color(Color::Muted)
901                                    .on_click(move |_, _, cx| cx.open_url(OLLAMA_LIBRARY_URL)),
902                            ),
903                    )
904                    .map(|this| {
905                        if is_authenticated {
906                            this.child(
907                                ButtonLike::new("connected")
908                                    .disabled(true)
909                                    .cursor_style(gpui::CursorStyle::Arrow)
910                                    .child(
911                                        h_flex()
912                                            .gap_2()
913                                            .child(Icon::new(IconName::Check).color(Color::Success))
914                                            .child(Label::new("Connected"))
915                                            .into_any_element(),
916                                    ),
917                            )
918                        } else {
919                            this.child(
920                                Button::new("retry_ollama_models", "Connect")
921                                    .icon_position(IconPosition::Start)
922                                    .icon_size(IconSize::XSmall)
923                                    .icon(IconName::PlayOutlined)
924                                    .on_click(
925                                        cx.listener(move |this, _, _, cx| {
926                                            this.retry_connection(cx)
927                                        }),
928                                    ),
929                            )
930                        }
931                    }),
932            )
933    }
934}
935
936fn tool_into_ollama(tool: LanguageModelRequestTool) -> ollama::OllamaTool {
937    ollama::OllamaTool::Function {
938        function: OllamaFunctionTool {
939            name: tool.name,
940            description: Some(tool.description),
941            parameters: Some(tool.input_schema),
942        },
943    }
944}