1use anyhow::{Result, anyhow};
2use fs::Fs;
3use futures::{FutureExt, StreamExt, future::BoxFuture, stream::BoxStream};
4use futures::{Stream, TryFutureExt, stream};
5use gpui::{AnyView, App, AsyncApp, Context, CursorStyle, Entity, Task};
6use http_client::HttpClient;
7use language_model::{
8 ApiKeyState, AuthenticateError, EnvVar, LanguageModel, LanguageModelCompletionError,
9 LanguageModelCompletionEvent, LanguageModelId, LanguageModelName, LanguageModelProvider,
10 LanguageModelProviderId, LanguageModelProviderName, LanguageModelProviderState,
11 LanguageModelRequest, LanguageModelRequestTool, LanguageModelToolChoice, LanguageModelToolUse,
12 LanguageModelToolUseId, MessageContent, RateLimiter, Role, StopReason, TokenUsage, env_var,
13};
14use menu;
15use ollama::{
16 ChatMessage, ChatOptions, ChatRequest, ChatResponseDelta, OLLAMA_API_URL, OllamaFunctionCall,
17 OllamaFunctionTool, OllamaToolCall, get_models, show_model, stream_chat_completion,
18};
19pub use settings::OllamaAvailableModel as AvailableModel;
20use settings::{Settings, SettingsStore, update_settings_file};
21use std::pin::Pin;
22use std::sync::LazyLock;
23use std::sync::atomic::{AtomicU64, Ordering};
24use std::{collections::HashMap, sync::Arc};
25use ui::{
26 ButtonLike, ButtonLink, ConfiguredApiCard, ElevationIndex, InlineCode, List, ListBulletItem,
27 Tooltip, prelude::*,
28};
29use ui_input::InputField;
30
31use crate::AllLanguageModelSettings;
32
33const OLLAMA_DOWNLOAD_URL: &str = "https://ollama.com/download";
34const OLLAMA_LIBRARY_URL: &str = "https://ollama.com/library";
35const OLLAMA_SITE: &str = "https://ollama.com/";
36
37const PROVIDER_ID: LanguageModelProviderId = LanguageModelProviderId::new("ollama");
38const PROVIDER_NAME: LanguageModelProviderName = LanguageModelProviderName::new("Ollama");
39
40const API_KEY_ENV_VAR_NAME: &str = "OLLAMA_API_KEY";
41static API_KEY_ENV_VAR: LazyLock<EnvVar> = env_var!(API_KEY_ENV_VAR_NAME);
42
43#[derive(Default, Debug, Clone, PartialEq)]
44pub struct OllamaSettings {
45 pub api_url: String,
46 pub available_models: Vec<AvailableModel>,
47}
48
49pub struct OllamaLanguageModelProvider {
50 http_client: Arc<dyn HttpClient>,
51 state: Entity<State>,
52}
53
54pub struct State {
55 api_key_state: ApiKeyState,
56 http_client: Arc<dyn HttpClient>,
57 fetched_models: Vec<ollama::Model>,
58 fetch_model_task: Option<Task<Result<()>>>,
59}
60
61impl State {
62 fn is_authenticated(&self) -> bool {
63 !self.fetched_models.is_empty()
64 }
65
66 fn set_api_key(&mut self, api_key: Option<String>, cx: &mut Context<Self>) -> Task<Result<()>> {
67 let api_url = OllamaLanguageModelProvider::api_url(cx);
68 let task = self
69 .api_key_state
70 .store(api_url, api_key, |this| &mut this.api_key_state, cx);
71
72 self.fetched_models.clear();
73 cx.spawn(async move |this, cx| {
74 let result = task.await;
75 this.update(cx, |this, cx| this.restart_fetch_models_task(cx))
76 .ok();
77 result
78 })
79 }
80
81 fn authenticate(&mut self, cx: &mut Context<Self>) -> Task<Result<(), AuthenticateError>> {
82 let api_url = OllamaLanguageModelProvider::api_url(cx);
83 let task = self
84 .api_key_state
85 .load_if_needed(api_url, |this| &mut this.api_key_state, cx);
86
87 // Always try to fetch models - if no API key is needed (local Ollama), it will work
88 // If API key is needed and provided, it will work
89 // If API key is needed and not provided, it will fail gracefully
90 cx.spawn(async move |this, cx| {
91 let result = task.await;
92 this.update(cx, |this, cx| this.restart_fetch_models_task(cx))
93 .ok();
94 result
95 })
96 }
97
98 fn fetch_models(&mut self, cx: &mut Context<Self>) -> Task<Result<()>> {
99 let http_client = Arc::clone(&self.http_client);
100 let api_url = OllamaLanguageModelProvider::api_url(cx);
101 let api_key = self.api_key_state.key(&api_url);
102
103 // As a proxy for the server being "authenticated", we'll check if its up by fetching the models
104 cx.spawn(async move |this, cx| {
105 let models = get_models(http_client.as_ref(), &api_url, api_key.as_deref()).await?;
106
107 let tasks = models
108 .into_iter()
109 // Since there is no metadata from the Ollama API
110 // indicating which models are embedding models,
111 // simply filter out models with "-embed" in their name
112 .filter(|model| !model.name.contains("-embed"))
113 .map(|model| {
114 let http_client = Arc::clone(&http_client);
115 let api_url = api_url.clone();
116 let api_key = api_key.clone();
117 async move {
118 let name = model.name.as_str();
119 let model =
120 show_model(http_client.as_ref(), &api_url, api_key.as_deref(), name)
121 .await?;
122 let ollama_model = ollama::Model::new(
123 name,
124 None,
125 model.context_length,
126 Some(model.supports_tools()),
127 Some(model.supports_vision()),
128 Some(model.supports_thinking()),
129 );
130 Ok(ollama_model)
131 }
132 });
133
134 // Rate-limit capability fetches
135 // since there is an arbitrary number of models available
136 let mut ollama_models: Vec<_> = futures::stream::iter(tasks)
137 .buffer_unordered(5)
138 .collect::<Vec<Result<_>>>()
139 .await
140 .into_iter()
141 .collect::<Result<Vec<_>>>()?;
142
143 ollama_models.sort_by(|a, b| a.name.cmp(&b.name));
144
145 this.update(cx, |this, cx| {
146 this.fetched_models = ollama_models;
147 cx.notify();
148 })
149 })
150 }
151
152 fn restart_fetch_models_task(&mut self, cx: &mut Context<Self>) {
153 let task = self.fetch_models(cx);
154 self.fetch_model_task.replace(task);
155 }
156}
157
158impl OllamaLanguageModelProvider {
159 pub fn new(http_client: Arc<dyn HttpClient>, cx: &mut App) -> Self {
160 let this = Self {
161 http_client: http_client.clone(),
162 state: cx.new(|cx| {
163 cx.observe_global::<SettingsStore>({
164 let mut last_settings = OllamaLanguageModelProvider::settings(cx).clone();
165 move |this: &mut State, cx| {
166 let current_settings = OllamaLanguageModelProvider::settings(cx);
167 let settings_changed = current_settings != &last_settings;
168 if settings_changed {
169 let url_changed = last_settings.api_url != current_settings.api_url;
170 last_settings = current_settings.clone();
171 if url_changed {
172 this.fetched_models.clear();
173 this.authenticate(cx).detach();
174 }
175 cx.notify();
176 }
177 }
178 })
179 .detach();
180
181 State {
182 http_client,
183 fetched_models: Default::default(),
184 fetch_model_task: None,
185 api_key_state: ApiKeyState::new(Self::api_url(cx), (*API_KEY_ENV_VAR).clone()),
186 }
187 }),
188 };
189 this
190 }
191
192 fn settings(cx: &App) -> &OllamaSettings {
193 &AllLanguageModelSettings::get_global(cx).ollama
194 }
195
196 fn api_url(cx: &App) -> SharedString {
197 let api_url = &Self::settings(cx).api_url;
198 if api_url.is_empty() {
199 OLLAMA_API_URL.into()
200 } else {
201 SharedString::new(api_url.as_str())
202 }
203 }
204}
205
206impl LanguageModelProviderState for OllamaLanguageModelProvider {
207 type ObservableEntity = State;
208
209 fn observable_entity(&self) -> Option<Entity<Self::ObservableEntity>> {
210 Some(self.state.clone())
211 }
212}
213
214impl LanguageModelProvider for OllamaLanguageModelProvider {
215 fn id(&self) -> LanguageModelProviderId {
216 PROVIDER_ID
217 }
218
219 fn name(&self) -> LanguageModelProviderName {
220 PROVIDER_NAME
221 }
222
223 fn icon(&self) -> IconName {
224 IconName::AiOllama
225 }
226
227 fn default_model(&self, _: &App) -> Option<Arc<dyn LanguageModel>> {
228 // We shouldn't try to select default model, because it might lead to a load call for an unloaded model.
229 // In a constrained environment where user might not have enough resources it'll be a bad UX to select something
230 // to load by default.
231 None
232 }
233
234 fn default_fast_model(&self, _: &App) -> Option<Arc<dyn LanguageModel>> {
235 // See explanation for default_model.
236 None
237 }
238
239 fn provided_models(&self, cx: &App) -> Vec<Arc<dyn LanguageModel>> {
240 let mut models: HashMap<String, ollama::Model> = HashMap::new();
241
242 // Add models from the Ollama API
243 for model in self.state.read(cx).fetched_models.iter() {
244 models.insert(model.name.clone(), model.clone());
245 }
246
247 // Override with available models from settings
248 for setting_model in &OllamaLanguageModelProvider::settings(cx).available_models {
249 let setting_base = setting_model.name.split(':').next().unwrap();
250 if let Some(model) = models
251 .values_mut()
252 .find(|m| m.name.split(':').next().unwrap() == setting_base)
253 {
254 model.max_tokens = setting_model.max_tokens;
255 model.display_name = setting_model.display_name.clone();
256 model.keep_alive = setting_model.keep_alive.clone();
257 model.supports_tools = setting_model.supports_tools;
258 model.supports_vision = setting_model.supports_images;
259 model.supports_thinking = setting_model.supports_thinking;
260 } else {
261 models.insert(
262 setting_model.name.clone(),
263 ollama::Model {
264 name: setting_model.name.clone(),
265 display_name: setting_model.display_name.clone(),
266 max_tokens: setting_model.max_tokens,
267 keep_alive: setting_model.keep_alive.clone(),
268 supports_tools: setting_model.supports_tools,
269 supports_vision: setting_model.supports_images,
270 supports_thinking: setting_model.supports_thinking,
271 },
272 );
273 }
274 }
275
276 let mut models = models
277 .into_values()
278 .map(|model| {
279 Arc::new(OllamaLanguageModel {
280 id: LanguageModelId::from(model.name.clone()),
281 model,
282 http_client: self.http_client.clone(),
283 request_limiter: RateLimiter::new(4),
284 state: self.state.clone(),
285 }) as Arc<dyn LanguageModel>
286 })
287 .collect::<Vec<_>>();
288 models.sort_by_key(|model| model.name());
289 models
290 }
291
292 fn is_authenticated(&self, cx: &App) -> bool {
293 self.state.read(cx).is_authenticated()
294 }
295
296 fn authenticate(&self, cx: &mut App) -> Task<Result<(), AuthenticateError>> {
297 self.state.update(cx, |state, cx| state.authenticate(cx))
298 }
299
300 fn configuration_view(
301 &self,
302 _target_agent: language_model::ConfigurationViewTargetAgent,
303 window: &mut Window,
304 cx: &mut App,
305 ) -> AnyView {
306 let state = self.state.clone();
307 cx.new(|cx| ConfigurationView::new(state, window, cx))
308 .into()
309 }
310
311 fn reset_credentials(&self, cx: &mut App) -> Task<Result<()>> {
312 self.state
313 .update(cx, |state, cx| state.set_api_key(None, cx))
314 }
315}
316
317pub struct OllamaLanguageModel {
318 id: LanguageModelId,
319 model: ollama::Model,
320 http_client: Arc<dyn HttpClient>,
321 request_limiter: RateLimiter,
322 state: Entity<State>,
323}
324
325impl OllamaLanguageModel {
326 fn to_ollama_request(&self, request: LanguageModelRequest) -> ChatRequest {
327 let supports_vision = self.model.supports_vision.unwrap_or(false);
328
329 let mut messages = Vec::with_capacity(request.messages.len());
330
331 for mut msg in request.messages.into_iter() {
332 let images = if supports_vision {
333 msg.content
334 .iter()
335 .filter_map(|content| match content {
336 MessageContent::Image(image) => Some(image.source.to_string()),
337 _ => None,
338 })
339 .collect::<Vec<String>>()
340 } else {
341 vec![]
342 };
343
344 match msg.role {
345 Role::User => {
346 for tool_result in msg
347 .content
348 .extract_if(.., |x| matches!(x, MessageContent::ToolResult(..)))
349 {
350 match tool_result {
351 MessageContent::ToolResult(tool_result) => {
352 messages.push(ChatMessage::Tool {
353 tool_name: tool_result.tool_name.to_string(),
354 content: tool_result.content.to_str().unwrap_or("").to_string(),
355 })
356 }
357 _ => unreachable!("Only tool result should be extracted"),
358 }
359 }
360 if !msg.content.is_empty() {
361 messages.push(ChatMessage::User {
362 content: msg.string_contents(),
363 images: if images.is_empty() {
364 None
365 } else {
366 Some(images)
367 },
368 })
369 }
370 }
371 Role::Assistant => {
372 let content = msg.string_contents();
373 let mut thinking = None;
374 let mut tool_calls = Vec::new();
375 for content in msg.content.into_iter() {
376 match content {
377 MessageContent::Thinking { text, .. } if !text.is_empty() => {
378 thinking = Some(text)
379 }
380 MessageContent::ToolUse(tool_use) => {
381 tool_calls.push(OllamaToolCall {
382 id: Some(tool_use.id.to_string()),
383 function: OllamaFunctionCall {
384 name: tool_use.name.to_string(),
385 arguments: tool_use.input,
386 },
387 });
388 }
389 _ => (),
390 }
391 }
392 messages.push(ChatMessage::Assistant {
393 content,
394 tool_calls: Some(tool_calls),
395 images: if images.is_empty() {
396 None
397 } else {
398 Some(images)
399 },
400 thinking,
401 })
402 }
403 Role::System => messages.push(ChatMessage::System {
404 content: msg.string_contents(),
405 }),
406 }
407 }
408 ChatRequest {
409 model: self.model.name.clone(),
410 messages,
411 keep_alive: self.model.keep_alive.clone().unwrap_or_default(),
412 stream: true,
413 options: Some(ChatOptions {
414 num_ctx: Some(self.model.max_tokens),
415 stop: Some(request.stop),
416 temperature: request.temperature.or(Some(1.0)),
417 ..Default::default()
418 }),
419 think: self
420 .model
421 .supports_thinking
422 .map(|supports_thinking| supports_thinking && request.thinking_allowed),
423 tools: if self.model.supports_tools.unwrap_or(false) {
424 request.tools.into_iter().map(tool_into_ollama).collect()
425 } else {
426 vec![]
427 },
428 }
429 }
430}
431
432impl LanguageModel for OllamaLanguageModel {
433 fn id(&self) -> LanguageModelId {
434 self.id.clone()
435 }
436
437 fn name(&self) -> LanguageModelName {
438 LanguageModelName::from(self.model.display_name().to_string())
439 }
440
441 fn provider_id(&self) -> LanguageModelProviderId {
442 PROVIDER_ID
443 }
444
445 fn provider_name(&self) -> LanguageModelProviderName {
446 PROVIDER_NAME
447 }
448
449 fn supports_tools(&self) -> bool {
450 self.model.supports_tools.unwrap_or(false)
451 }
452
453 fn supports_images(&self) -> bool {
454 self.model.supports_vision.unwrap_or(false)
455 }
456
457 fn supports_tool_choice(&self, choice: LanguageModelToolChoice) -> bool {
458 match choice {
459 LanguageModelToolChoice::Auto => false,
460 LanguageModelToolChoice::Any => false,
461 LanguageModelToolChoice::None => false,
462 }
463 }
464
465 fn telemetry_id(&self) -> String {
466 format!("ollama/{}", self.model.id())
467 }
468
469 fn max_token_count(&self) -> u64 {
470 self.model.max_token_count()
471 }
472
473 fn count_tokens(
474 &self,
475 request: LanguageModelRequest,
476 _cx: &App,
477 ) -> BoxFuture<'static, Result<u64>> {
478 // There is no endpoint for this _yet_ in Ollama
479 // see: https://github.com/ollama/ollama/issues/1716 and https://github.com/ollama/ollama/issues/3582
480 let token_count = request
481 .messages
482 .iter()
483 .map(|msg| msg.string_contents().chars().count())
484 .sum::<usize>()
485 / 4;
486
487 async move { Ok(token_count as u64) }.boxed()
488 }
489
490 fn stream_completion(
491 &self,
492 request: LanguageModelRequest,
493 cx: &AsyncApp,
494 ) -> BoxFuture<
495 'static,
496 Result<
497 BoxStream<'static, Result<LanguageModelCompletionEvent, LanguageModelCompletionError>>,
498 LanguageModelCompletionError,
499 >,
500 > {
501 let request = self.to_ollama_request(request);
502
503 let http_client = self.http_client.clone();
504 let Ok((api_key, api_url)) = self.state.read_with(cx, |state, cx| {
505 let api_url = OllamaLanguageModelProvider::api_url(cx);
506 (state.api_key_state.key(&api_url), api_url)
507 }) else {
508 return futures::future::ready(Err(anyhow!("App state dropped").into())).boxed();
509 };
510
511 let future = self.request_limiter.stream(async move {
512 let stream =
513 stream_chat_completion(http_client.as_ref(), &api_url, api_key.as_deref(), request)
514 .await?;
515 let stream = map_to_language_model_completion_events(stream);
516 Ok(stream)
517 });
518
519 future.map_ok(|f| f.boxed()).boxed()
520 }
521}
522
523fn map_to_language_model_completion_events(
524 stream: Pin<Box<dyn Stream<Item = anyhow::Result<ChatResponseDelta>> + Send>>,
525) -> impl Stream<Item = Result<LanguageModelCompletionEvent, LanguageModelCompletionError>> {
526 // Used for creating unique tool use ids
527 static TOOL_CALL_COUNTER: AtomicU64 = AtomicU64::new(0);
528
529 struct State {
530 stream: Pin<Box<dyn Stream<Item = anyhow::Result<ChatResponseDelta>> + Send>>,
531 used_tools: bool,
532 }
533
534 // We need to create a ToolUse and Stop event from a single
535 // response from the original stream
536 let stream = stream::unfold(
537 State {
538 stream,
539 used_tools: false,
540 },
541 async move |mut state| {
542 let response = state.stream.next().await?;
543
544 let delta = match response {
545 Ok(delta) => delta,
546 Err(e) => {
547 let event = Err(LanguageModelCompletionError::from(anyhow!(e)));
548 return Some((vec![event], state));
549 }
550 };
551
552 let mut events = Vec::new();
553
554 match delta.message {
555 ChatMessage::User { content, images: _ } => {
556 events.push(Ok(LanguageModelCompletionEvent::Text(content)));
557 }
558 ChatMessage::System { content } => {
559 events.push(Ok(LanguageModelCompletionEvent::Text(content)));
560 }
561 ChatMessage::Tool { content, .. } => {
562 events.push(Ok(LanguageModelCompletionEvent::Text(content)));
563 }
564 ChatMessage::Assistant {
565 content,
566 tool_calls,
567 images: _,
568 thinking,
569 } => {
570 if let Some(text) = thinking {
571 events.push(Ok(LanguageModelCompletionEvent::Thinking {
572 text,
573 signature: None,
574 }));
575 }
576
577 if let Some(tool_call) = tool_calls.and_then(|v| v.into_iter().next()) {
578 let OllamaToolCall { id, function } = tool_call;
579 let id = id.unwrap_or_else(|| {
580 format!(
581 "{}-{}",
582 &function.name,
583 TOOL_CALL_COUNTER.fetch_add(1, Ordering::Relaxed)
584 )
585 });
586 let event = LanguageModelCompletionEvent::ToolUse(LanguageModelToolUse {
587 id: LanguageModelToolUseId::from(id),
588 name: Arc::from(function.name),
589 raw_input: function.arguments.to_string(),
590 input: function.arguments,
591 is_input_complete: true,
592 thought_signature: None,
593 });
594 events.push(Ok(event));
595 state.used_tools = true;
596 } else if !content.is_empty() {
597 events.push(Ok(LanguageModelCompletionEvent::Text(content)));
598 }
599 }
600 };
601
602 if delta.done {
603 events.push(Ok(LanguageModelCompletionEvent::UsageUpdate(TokenUsage {
604 input_tokens: delta.prompt_eval_count.unwrap_or(0),
605 output_tokens: delta.eval_count.unwrap_or(0),
606 cache_creation_input_tokens: 0,
607 cache_read_input_tokens: 0,
608 })));
609 if state.used_tools {
610 state.used_tools = false;
611 events.push(Ok(LanguageModelCompletionEvent::Stop(StopReason::ToolUse)));
612 } else {
613 events.push(Ok(LanguageModelCompletionEvent::Stop(StopReason::EndTurn)));
614 }
615 }
616
617 Some((events, state))
618 },
619 );
620
621 stream.flat_map(futures::stream::iter)
622}
623
624struct ConfigurationView {
625 api_key_editor: Entity<InputField>,
626 api_url_editor: Entity<InputField>,
627 state: Entity<State>,
628}
629
630impl ConfigurationView {
631 pub fn new(state: Entity<State>, window: &mut Window, cx: &mut Context<Self>) -> Self {
632 let api_key_editor = cx.new(|cx| InputField::new(window, cx, "63e02e...").label("API key"));
633
634 let api_url_editor = cx.new(|cx| {
635 let input = InputField::new(window, cx, OLLAMA_API_URL).label("API URL");
636 input.set_text(OllamaLanguageModelProvider::api_url(cx), window, cx);
637 input
638 });
639
640 cx.observe(&state, |_, _, cx| {
641 cx.notify();
642 })
643 .detach();
644
645 Self {
646 api_key_editor,
647 api_url_editor,
648 state,
649 }
650 }
651
652 fn retry_connection(&self, cx: &mut App) {
653 self.state
654 .update(cx, |state, cx| state.restart_fetch_models_task(cx));
655 }
656
657 fn save_api_key(&mut self, _: &menu::Confirm, window: &mut Window, cx: &mut Context<Self>) {
658 let api_key = self.api_key_editor.read(cx).text(cx).trim().to_string();
659 if api_key.is_empty() {
660 return;
661 }
662
663 // url changes can cause the editor to be displayed again
664 self.api_key_editor
665 .update(cx, |input, cx| input.set_text("", window, cx));
666
667 let state = self.state.clone();
668 cx.spawn_in(window, async move |_, cx| {
669 state
670 .update(cx, |state, cx| state.set_api_key(Some(api_key), cx))?
671 .await
672 })
673 .detach_and_log_err(cx);
674 }
675
676 fn reset_api_key(&mut self, window: &mut Window, cx: &mut Context<Self>) {
677 self.api_key_editor
678 .update(cx, |input, cx| input.set_text("", window, cx));
679
680 let state = self.state.clone();
681 cx.spawn_in(window, async move |_, cx| {
682 state
683 .update(cx, |state, cx| state.set_api_key(None, cx))?
684 .await
685 })
686 .detach_and_log_err(cx);
687
688 cx.notify();
689 }
690
691 fn save_api_url(&mut self, cx: &mut Context<Self>) {
692 let api_url = self.api_url_editor.read(cx).text(cx).trim().to_string();
693 let current_url = OllamaLanguageModelProvider::api_url(cx);
694 if !api_url.is_empty() && &api_url != ¤t_url {
695 let fs = <dyn Fs>::global(cx);
696 update_settings_file(fs, cx, move |settings, _| {
697 settings
698 .language_models
699 .get_or_insert_default()
700 .ollama
701 .get_or_insert_default()
702 .api_url = Some(api_url);
703 });
704 }
705 }
706
707 fn reset_api_url(&mut self, window: &mut Window, cx: &mut Context<Self>) {
708 self.api_url_editor
709 .update(cx, |input, cx| input.set_text("", window, cx));
710 let fs = <dyn Fs>::global(cx);
711 update_settings_file(fs, cx, |settings, _cx| {
712 if let Some(settings) = settings
713 .language_models
714 .as_mut()
715 .and_then(|models| models.ollama.as_mut())
716 {
717 settings.api_url = Some(OLLAMA_API_URL.into());
718 }
719 });
720 cx.notify();
721 }
722
723 fn render_instructions() -> Div {
724 v_flex()
725 .gap_2()
726 .child(Label::new(
727 "Run LLMs locally on your machine with Ollama, or connect to an Ollama server. \
728 Can provide access to Llama, Mistral, Gemma, and hundreds of other models.",
729 ))
730 .child(Label::new("To use local Ollama:"))
731 .child(
732 List::new()
733 .child(
734 ListBulletItem::new("")
735 .child(Label::new("Download and install Ollama from"))
736 .child(ButtonLink::new("ollama.com", "https://ollama.com/download")),
737 )
738 .child(
739 ListBulletItem::new("")
740 .child(Label::new("Start Ollama and download a model:"))
741 .child(InlineCode::new("ollama run gpt-oss:20b")),
742 )
743 .child(ListBulletItem::new(
744 "Click 'Connect' below to start using Ollama in Zed",
745 )),
746 )
747 .child(Label::new(
748 "Alternatively, you can connect to an Ollama server by specifying its \
749 URL and API key (may not be required):",
750 ))
751 }
752
753 fn render_api_key_editor(&self, cx: &Context<Self>) -> impl IntoElement {
754 let state = self.state.read(cx);
755 let env_var_set = state.api_key_state.is_from_env_var();
756 let configured_card_label = if env_var_set {
757 format!("API key set in {API_KEY_ENV_VAR_NAME} environment variable.")
758 } else {
759 "API key configured".to_string()
760 };
761
762 if !state.api_key_state.has_key() {
763 v_flex()
764 .on_action(cx.listener(Self::save_api_key))
765 .child(self.api_key_editor.clone())
766 .child(
767 Label::new(
768 format!("You can also assign the {API_KEY_ENV_VAR_NAME} environment variable and restart Zed.")
769 )
770 .size(LabelSize::Small)
771 .color(Color::Muted),
772 )
773 .into_any_element()
774 } else {
775 ConfiguredApiCard::new(configured_card_label)
776 .disabled(env_var_set)
777 .on_click(cx.listener(|this, _, window, cx| this.reset_api_key(window, cx)))
778 .when(env_var_set, |this| {
779 this.tooltip_label(format!("To reset your API key, unset the {API_KEY_ENV_VAR_NAME} environment variable."))
780 })
781 .into_any_element()
782 }
783 }
784
785 fn render_api_url_editor(&self, cx: &Context<Self>) -> Div {
786 let api_url = OllamaLanguageModelProvider::api_url(cx);
787 let custom_api_url_set = api_url != OLLAMA_API_URL;
788
789 if custom_api_url_set {
790 h_flex()
791 .p_3()
792 .justify_between()
793 .rounded_md()
794 .border_1()
795 .border_color(cx.theme().colors().border)
796 .bg(cx.theme().colors().elevated_surface_background)
797 .child(
798 h_flex()
799 .gap_2()
800 .child(Icon::new(IconName::Check).color(Color::Success))
801 .child(v_flex().gap_1().child(Label::new(api_url))),
802 )
803 .child(
804 Button::new("reset-api-url", "Reset API URL")
805 .label_size(LabelSize::Small)
806 .icon(IconName::Undo)
807 .icon_size(IconSize::Small)
808 .icon_position(IconPosition::Start)
809 .layer(ElevationIndex::ModalSurface)
810 .on_click(
811 cx.listener(|this, _, window, cx| this.reset_api_url(window, cx)),
812 ),
813 )
814 } else {
815 v_flex()
816 .on_action(cx.listener(|this, _: &menu::Confirm, _window, cx| {
817 this.save_api_url(cx);
818 cx.notify();
819 }))
820 .gap_2()
821 .child(self.api_url_editor.clone())
822 }
823 }
824}
825
826impl Render for ConfigurationView {
827 fn render(&mut self, _: &mut Window, cx: &mut Context<Self>) -> impl IntoElement {
828 let is_authenticated = self.state.read(cx).is_authenticated();
829
830 v_flex()
831 .gap_2()
832 .child(Self::render_instructions())
833 .child(self.render_api_url_editor(cx))
834 .child(self.render_api_key_editor(cx))
835 .child(
836 h_flex()
837 .w_full()
838 .justify_between()
839 .gap_2()
840 .child(
841 h_flex()
842 .w_full()
843 .gap_2()
844 .map(|this| {
845 if is_authenticated {
846 this.child(
847 Button::new("ollama-site", "Ollama")
848 .style(ButtonStyle::Subtle)
849 .icon(IconName::ArrowUpRight)
850 .icon_size(IconSize::XSmall)
851 .icon_color(Color::Muted)
852 .on_click(move |_, _, cx| cx.open_url(OLLAMA_SITE))
853 .into_any_element(),
854 )
855 } else {
856 this.child(
857 Button::new("download_ollama_button", "Download Ollama")
858 .style(ButtonStyle::Subtle)
859 .icon(IconName::ArrowUpRight)
860 .icon_size(IconSize::XSmall)
861 .icon_color(Color::Muted)
862 .on_click(move |_, _, cx| {
863 cx.open_url(OLLAMA_DOWNLOAD_URL)
864 })
865 .into_any_element(),
866 )
867 }
868 })
869 .child(
870 Button::new("view-models", "View All Models")
871 .style(ButtonStyle::Subtle)
872 .icon(IconName::ArrowUpRight)
873 .icon_size(IconSize::XSmall)
874 .icon_color(Color::Muted)
875 .on_click(move |_, _, cx| cx.open_url(OLLAMA_LIBRARY_URL)),
876 ),
877 )
878 .map(|this| {
879 if is_authenticated {
880 this.child(
881 ButtonLike::new("connected")
882 .disabled(true)
883 .cursor_style(CursorStyle::Arrow)
884 .child(
885 h_flex()
886 .gap_2()
887 .child(Icon::new(IconName::Check).color(Color::Success))
888 .child(Label::new("Connected"))
889 .into_any_element(),
890 )
891 .child(
892 IconButton::new("refresh-models", IconName::RotateCcw)
893 .tooltip(Tooltip::text("Refresh Models"))
894 .on_click(cx.listener(|this, _, _, cx| {
895 this.state.update(cx, |state, _| {
896 state.fetched_models.clear();
897 });
898 this.retry_connection(cx);
899 })),
900 ),
901 )
902 } else {
903 this.child(
904 Button::new("retry_ollama_models", "Connect")
905 .icon_position(IconPosition::Start)
906 .icon_size(IconSize::XSmall)
907 .icon(IconName::PlayOutlined)
908 .on_click(
909 cx.listener(move |this, _, _, cx| {
910 this.retry_connection(cx)
911 }),
912 ),
913 )
914 }
915 }),
916 )
917 }
918}
919
920fn tool_into_ollama(tool: LanguageModelRequestTool) -> ollama::OllamaTool {
921 ollama::OllamaTool::Function {
922 function: OllamaFunctionTool {
923 name: tool.name,
924 description: Some(tool.description),
925 parameters: Some(tool.input_schema),
926 },
927 }
928}