1use anyhow::{Result, anyhow};
2use fs::Fs;
3use futures::{FutureExt, StreamExt, future::BoxFuture, stream::BoxStream};
4use futures::{Stream, TryFutureExt, stream};
5use gpui::{AnyView, App, AsyncApp, Context, CursorStyle, Entity, Task};
6use http_client::HttpClient;
7use language_model::{
8 AuthenticateError, LanguageModel, LanguageModelCompletionError, LanguageModelCompletionEvent,
9 LanguageModelId, LanguageModelName, LanguageModelProvider, LanguageModelProviderId,
10 LanguageModelProviderName, LanguageModelProviderState, LanguageModelRequest,
11 LanguageModelRequestTool, LanguageModelToolChoice, LanguageModelToolUse,
12 LanguageModelToolUseId, MessageContent, RateLimiter, Role, StopReason, TokenUsage,
13};
14use menu;
15use ollama::{
16 ChatMessage, ChatOptions, ChatRequest, ChatResponseDelta, OLLAMA_API_URL, OllamaFunctionCall,
17 OllamaFunctionTool, OllamaToolCall, get_models, show_model, stream_chat_completion,
18};
19pub use settings::OllamaAvailableModel as AvailableModel;
20use settings::{Settings, SettingsStore, update_settings_file};
21use std::pin::Pin;
22use std::sync::LazyLock;
23use std::sync::atomic::{AtomicU64, Ordering};
24use std::{collections::HashMap, sync::Arc};
25use ui::{ButtonLike, ElevationIndex, List, Tooltip, prelude::*};
26use ui_input::InputField;
27use zed_env_vars::{EnvVar, env_var};
28
29use crate::AllLanguageModelSettings;
30use crate::api_key::ApiKeyState;
31use crate::ui::{ConfiguredApiCard, InstructionListItem};
32
33const OLLAMA_DOWNLOAD_URL: &str = "https://ollama.com/download";
34const OLLAMA_LIBRARY_URL: &str = "https://ollama.com/library";
35const OLLAMA_SITE: &str = "https://ollama.com/";
36
37const PROVIDER_ID: LanguageModelProviderId = LanguageModelProviderId::new("ollama");
38const PROVIDER_NAME: LanguageModelProviderName = LanguageModelProviderName::new("Ollama");
39
40const API_KEY_ENV_VAR_NAME: &str = "OLLAMA_API_KEY";
41static API_KEY_ENV_VAR: LazyLock<EnvVar> = env_var!(API_KEY_ENV_VAR_NAME);
42
43#[derive(Default, Debug, Clone, PartialEq)]
44pub struct OllamaSettings {
45 pub api_url: String,
46 pub available_models: Vec<AvailableModel>,
47}
48
49pub struct OllamaLanguageModelProvider {
50 http_client: Arc<dyn HttpClient>,
51 state: Entity<State>,
52}
53
54pub struct State {
55 api_key_state: ApiKeyState,
56 http_client: Arc<dyn HttpClient>,
57 fetched_models: Vec<ollama::Model>,
58 fetch_model_task: Option<Task<Result<()>>>,
59}
60
61impl State {
62 fn is_authenticated(&self) -> bool {
63 !self.fetched_models.is_empty()
64 }
65
66 fn set_api_key(&mut self, api_key: Option<String>, cx: &mut Context<Self>) -> Task<Result<()>> {
67 let api_url = OllamaLanguageModelProvider::api_url(cx);
68 let task = self
69 .api_key_state
70 .store(api_url, api_key, |this| &mut this.api_key_state, cx);
71
72 self.fetched_models.clear();
73 cx.spawn(async move |this, cx| {
74 let result = task.await;
75 this.update(cx, |this, cx| this.restart_fetch_models_task(cx))
76 .ok();
77 result
78 })
79 }
80
81 fn authenticate(&mut self, cx: &mut Context<Self>) -> Task<Result<(), AuthenticateError>> {
82 let api_url = OllamaLanguageModelProvider::api_url(cx);
83 let task = self.api_key_state.load_if_needed(
84 api_url,
85 &API_KEY_ENV_VAR,
86 |this| &mut this.api_key_state,
87 cx,
88 );
89
90 // Always try to fetch models - if no API key is needed (local Ollama), it will work
91 // If API key is needed and provided, it will work
92 // If API key is needed and not provided, it will fail gracefully
93 cx.spawn(async move |this, cx| {
94 let result = task.await;
95 this.update(cx, |this, cx| this.restart_fetch_models_task(cx))
96 .ok();
97 result
98 })
99 }
100
101 fn fetch_models(&mut self, cx: &mut Context<Self>) -> Task<Result<()>> {
102 let http_client = Arc::clone(&self.http_client);
103 let api_url = OllamaLanguageModelProvider::api_url(cx);
104 let api_key = self.api_key_state.key(&api_url);
105
106 // As a proxy for the server being "authenticated", we'll check if its up by fetching the models
107 cx.spawn(async move |this, cx| {
108 let models = get_models(http_client.as_ref(), &api_url, api_key.as_deref()).await?;
109
110 let tasks = models
111 .into_iter()
112 // Since there is no metadata from the Ollama API
113 // indicating which models are embedding models,
114 // simply filter out models with "-embed" in their name
115 .filter(|model| !model.name.contains("-embed"))
116 .map(|model| {
117 let http_client = Arc::clone(&http_client);
118 let api_url = api_url.clone();
119 let api_key = api_key.clone();
120 async move {
121 let name = model.name.as_str();
122 let model =
123 show_model(http_client.as_ref(), &api_url, api_key.as_deref(), name)
124 .await?;
125 let ollama_model = ollama::Model::new(
126 name,
127 None,
128 model.context_length,
129 Some(model.supports_tools()),
130 Some(model.supports_vision()),
131 Some(model.supports_thinking()),
132 );
133 Ok(ollama_model)
134 }
135 });
136
137 // Rate-limit capability fetches
138 // since there is an arbitrary number of models available
139 let mut ollama_models: Vec<_> = futures::stream::iter(tasks)
140 .buffer_unordered(5)
141 .collect::<Vec<Result<_>>>()
142 .await
143 .into_iter()
144 .collect::<Result<Vec<_>>>()?;
145
146 ollama_models.sort_by(|a, b| a.name.cmp(&b.name));
147
148 this.update(cx, |this, cx| {
149 this.fetched_models = ollama_models;
150 cx.notify();
151 })
152 })
153 }
154
155 fn restart_fetch_models_task(&mut self, cx: &mut Context<Self>) {
156 let task = self.fetch_models(cx);
157 self.fetch_model_task.replace(task);
158 }
159}
160
161impl OllamaLanguageModelProvider {
162 pub fn new(http_client: Arc<dyn HttpClient>, cx: &mut App) -> Self {
163 let this = Self {
164 http_client: http_client.clone(),
165 state: cx.new(|cx| {
166 cx.observe_global::<SettingsStore>({
167 let mut last_settings = OllamaLanguageModelProvider::settings(cx).clone();
168 move |this: &mut State, cx| {
169 let current_settings = OllamaLanguageModelProvider::settings(cx);
170 let settings_changed = current_settings != &last_settings;
171 if settings_changed {
172 let url_changed = last_settings.api_url != current_settings.api_url;
173 last_settings = current_settings.clone();
174 if url_changed {
175 this.fetched_models.clear();
176 this.authenticate(cx).detach();
177 }
178 cx.notify();
179 }
180 }
181 })
182 .detach();
183
184 State {
185 http_client,
186 fetched_models: Default::default(),
187 fetch_model_task: None,
188 api_key_state: ApiKeyState::new(Self::api_url(cx)),
189 }
190 }),
191 };
192 this
193 }
194
195 fn settings(cx: &App) -> &OllamaSettings {
196 &AllLanguageModelSettings::get_global(cx).ollama
197 }
198
199 fn api_url(cx: &App) -> SharedString {
200 let api_url = &Self::settings(cx).api_url;
201 if api_url.is_empty() {
202 OLLAMA_API_URL.into()
203 } else {
204 SharedString::new(api_url.as_str())
205 }
206 }
207}
208
209impl LanguageModelProviderState for OllamaLanguageModelProvider {
210 type ObservableEntity = State;
211
212 fn observable_entity(&self) -> Option<Entity<Self::ObservableEntity>> {
213 Some(self.state.clone())
214 }
215}
216
217impl LanguageModelProvider for OllamaLanguageModelProvider {
218 fn id(&self) -> LanguageModelProviderId {
219 PROVIDER_ID
220 }
221
222 fn name(&self) -> LanguageModelProviderName {
223 PROVIDER_NAME
224 }
225
226 fn icon(&self) -> IconName {
227 IconName::AiOllama
228 }
229
230 fn default_model(&self, _: &App) -> Option<Arc<dyn LanguageModel>> {
231 // We shouldn't try to select default model, because it might lead to a load call for an unloaded model.
232 // In a constrained environment where user might not have enough resources it'll be a bad UX to select something
233 // to load by default.
234 None
235 }
236
237 fn default_fast_model(&self, _: &App) -> Option<Arc<dyn LanguageModel>> {
238 // See explanation for default_model.
239 None
240 }
241
242 fn provided_models(&self, cx: &App) -> Vec<Arc<dyn LanguageModel>> {
243 let mut models: HashMap<String, ollama::Model> = HashMap::new();
244
245 // Add models from the Ollama API
246 for model in self.state.read(cx).fetched_models.iter() {
247 models.insert(model.name.clone(), model.clone());
248 }
249
250 // Override with available models from settings
251 for setting_model in &OllamaLanguageModelProvider::settings(cx).available_models {
252 let setting_base = setting_model.name.split(':').next().unwrap();
253 if let Some(model) = models
254 .values_mut()
255 .find(|m| m.name.split(':').next().unwrap() == setting_base)
256 {
257 model.max_tokens = setting_model.max_tokens;
258 model.display_name = setting_model.display_name.clone();
259 model.keep_alive = setting_model.keep_alive.clone();
260 model.supports_tools = setting_model.supports_tools;
261 model.supports_vision = setting_model.supports_images;
262 model.supports_thinking = setting_model.supports_thinking;
263 } else {
264 models.insert(
265 setting_model.name.clone(),
266 ollama::Model {
267 name: setting_model.name.clone(),
268 display_name: setting_model.display_name.clone(),
269 max_tokens: setting_model.max_tokens,
270 keep_alive: setting_model.keep_alive.clone(),
271 supports_tools: setting_model.supports_tools,
272 supports_vision: setting_model.supports_images,
273 supports_thinking: setting_model.supports_thinking,
274 },
275 );
276 }
277 }
278
279 let mut models = models
280 .into_values()
281 .map(|model| {
282 Arc::new(OllamaLanguageModel {
283 id: LanguageModelId::from(model.name.clone()),
284 model,
285 http_client: self.http_client.clone(),
286 request_limiter: RateLimiter::new(4),
287 state: self.state.clone(),
288 }) as Arc<dyn LanguageModel>
289 })
290 .collect::<Vec<_>>();
291 models.sort_by_key(|model| model.name());
292 models
293 }
294
295 fn is_authenticated(&self, cx: &App) -> bool {
296 self.state.read(cx).is_authenticated()
297 }
298
299 fn authenticate(&self, cx: &mut App) -> Task<Result<(), AuthenticateError>> {
300 self.state.update(cx, |state, cx| state.authenticate(cx))
301 }
302
303 fn configuration_view(
304 &self,
305 _target_agent: language_model::ConfigurationViewTargetAgent,
306 window: &mut Window,
307 cx: &mut App,
308 ) -> AnyView {
309 let state = self.state.clone();
310 cx.new(|cx| ConfigurationView::new(state, window, cx))
311 .into()
312 }
313
314 fn reset_credentials(&self, cx: &mut App) -> Task<Result<()>> {
315 self.state
316 .update(cx, |state, cx| state.set_api_key(None, cx))
317 }
318}
319
320pub struct OllamaLanguageModel {
321 id: LanguageModelId,
322 model: ollama::Model,
323 http_client: Arc<dyn HttpClient>,
324 request_limiter: RateLimiter,
325 state: Entity<State>,
326}
327
328impl OllamaLanguageModel {
329 fn to_ollama_request(&self, request: LanguageModelRequest) -> ChatRequest {
330 let supports_vision = self.model.supports_vision.unwrap_or(false);
331
332 let mut messages = Vec::with_capacity(request.messages.len());
333
334 for mut msg in request.messages.into_iter() {
335 let images = if supports_vision {
336 msg.content
337 .iter()
338 .filter_map(|content| match content {
339 MessageContent::Image(image) => Some(image.source.to_string()),
340 _ => None,
341 })
342 .collect::<Vec<String>>()
343 } else {
344 vec![]
345 };
346
347 match msg.role {
348 Role::User => {
349 for tool_result in msg
350 .content
351 .extract_if(.., |x| matches!(x, MessageContent::ToolResult(..)))
352 {
353 match tool_result {
354 MessageContent::ToolResult(tool_result) => {
355 messages.push(ChatMessage::Tool {
356 tool_name: tool_result.tool_name.to_string(),
357 content: tool_result.content.to_str().unwrap_or("").to_string(),
358 })
359 }
360 _ => unreachable!("Only tool result should be extracted"),
361 }
362 }
363 if !msg.content.is_empty() {
364 messages.push(ChatMessage::User {
365 content: msg.string_contents(),
366 images: if images.is_empty() {
367 None
368 } else {
369 Some(images)
370 },
371 })
372 }
373 }
374 Role::Assistant => {
375 let content = msg.string_contents();
376 let mut thinking = None;
377 let mut tool_calls = Vec::new();
378 for content in msg.content.into_iter() {
379 match content {
380 MessageContent::Thinking { text, .. } if !text.is_empty() => {
381 thinking = Some(text)
382 }
383 MessageContent::ToolUse(tool_use) => {
384 tool_calls.push(OllamaToolCall {
385 id: Some(tool_use.id.to_string()),
386 function: OllamaFunctionCall {
387 name: tool_use.name.to_string(),
388 arguments: tool_use.input,
389 },
390 });
391 }
392 _ => (),
393 }
394 }
395 messages.push(ChatMessage::Assistant {
396 content,
397 tool_calls: Some(tool_calls),
398 images: if images.is_empty() {
399 None
400 } else {
401 Some(images)
402 },
403 thinking,
404 })
405 }
406 Role::System => messages.push(ChatMessage::System {
407 content: msg.string_contents(),
408 }),
409 }
410 }
411 ChatRequest {
412 model: self.model.name.clone(),
413 messages,
414 keep_alive: self.model.keep_alive.clone().unwrap_or_default(),
415 stream: true,
416 options: Some(ChatOptions {
417 num_ctx: Some(self.model.max_tokens),
418 stop: Some(request.stop),
419 temperature: request.temperature.or(Some(1.0)),
420 ..Default::default()
421 }),
422 think: self
423 .model
424 .supports_thinking
425 .map(|supports_thinking| supports_thinking && request.thinking_allowed),
426 tools: if self.model.supports_tools.unwrap_or(false) {
427 request.tools.into_iter().map(tool_into_ollama).collect()
428 } else {
429 vec![]
430 },
431 }
432 }
433}
434
435impl LanguageModel for OllamaLanguageModel {
436 fn id(&self) -> LanguageModelId {
437 self.id.clone()
438 }
439
440 fn name(&self) -> LanguageModelName {
441 LanguageModelName::from(self.model.display_name().to_string())
442 }
443
444 fn provider_id(&self) -> LanguageModelProviderId {
445 PROVIDER_ID
446 }
447
448 fn provider_name(&self) -> LanguageModelProviderName {
449 PROVIDER_NAME
450 }
451
452 fn supports_tools(&self) -> bool {
453 self.model.supports_tools.unwrap_or(false)
454 }
455
456 fn supports_images(&self) -> bool {
457 self.model.supports_vision.unwrap_or(false)
458 }
459
460 fn supports_tool_choice(&self, choice: LanguageModelToolChoice) -> bool {
461 match choice {
462 LanguageModelToolChoice::Auto => false,
463 LanguageModelToolChoice::Any => false,
464 LanguageModelToolChoice::None => false,
465 }
466 }
467
468 fn telemetry_id(&self) -> String {
469 format!("ollama/{}", self.model.id())
470 }
471
472 fn max_token_count(&self) -> u64 {
473 self.model.max_token_count()
474 }
475
476 fn count_tokens(
477 &self,
478 request: LanguageModelRequest,
479 _cx: &App,
480 ) -> BoxFuture<'static, Result<u64>> {
481 // There is no endpoint for this _yet_ in Ollama
482 // see: https://github.com/ollama/ollama/issues/1716 and https://github.com/ollama/ollama/issues/3582
483 let token_count = request
484 .messages
485 .iter()
486 .map(|msg| msg.string_contents().chars().count())
487 .sum::<usize>()
488 / 4;
489
490 async move { Ok(token_count as u64) }.boxed()
491 }
492
493 fn stream_completion(
494 &self,
495 request: LanguageModelRequest,
496 cx: &AsyncApp,
497 ) -> BoxFuture<
498 'static,
499 Result<
500 BoxStream<'static, Result<LanguageModelCompletionEvent, LanguageModelCompletionError>>,
501 LanguageModelCompletionError,
502 >,
503 > {
504 let request = self.to_ollama_request(request);
505
506 let http_client = self.http_client.clone();
507 let Ok((api_key, api_url)) = self.state.read_with(cx, |state, cx| {
508 let api_url = OllamaLanguageModelProvider::api_url(cx);
509 (state.api_key_state.key(&api_url), api_url)
510 }) else {
511 return futures::future::ready(Err(anyhow!("App state dropped").into())).boxed();
512 };
513
514 let future = self.request_limiter.stream(async move {
515 let stream =
516 stream_chat_completion(http_client.as_ref(), &api_url, api_key.as_deref(), request)
517 .await?;
518 let stream = map_to_language_model_completion_events(stream);
519 Ok(stream)
520 });
521
522 future.map_ok(|f| f.boxed()).boxed()
523 }
524}
525
526fn map_to_language_model_completion_events(
527 stream: Pin<Box<dyn Stream<Item = anyhow::Result<ChatResponseDelta>> + Send>>,
528) -> impl Stream<Item = Result<LanguageModelCompletionEvent, LanguageModelCompletionError>> {
529 // Used for creating unique tool use ids
530 static TOOL_CALL_COUNTER: AtomicU64 = AtomicU64::new(0);
531
532 struct State {
533 stream: Pin<Box<dyn Stream<Item = anyhow::Result<ChatResponseDelta>> + Send>>,
534 used_tools: bool,
535 }
536
537 // We need to create a ToolUse and Stop event from a single
538 // response from the original stream
539 let stream = stream::unfold(
540 State {
541 stream,
542 used_tools: false,
543 },
544 async move |mut state| {
545 let response = state.stream.next().await?;
546
547 let delta = match response {
548 Ok(delta) => delta,
549 Err(e) => {
550 let event = Err(LanguageModelCompletionError::from(anyhow!(e)));
551 return Some((vec![event], state));
552 }
553 };
554
555 let mut events = Vec::new();
556
557 match delta.message {
558 ChatMessage::User { content, images: _ } => {
559 events.push(Ok(LanguageModelCompletionEvent::Text(content)));
560 }
561 ChatMessage::System { content } => {
562 events.push(Ok(LanguageModelCompletionEvent::Text(content)));
563 }
564 ChatMessage::Tool { content, .. } => {
565 events.push(Ok(LanguageModelCompletionEvent::Text(content)));
566 }
567 ChatMessage::Assistant {
568 content,
569 tool_calls,
570 images: _,
571 thinking,
572 } => {
573 if let Some(text) = thinking {
574 events.push(Ok(LanguageModelCompletionEvent::Thinking {
575 text,
576 signature: None,
577 }));
578 }
579
580 if let Some(tool_call) = tool_calls.and_then(|v| v.into_iter().next()) {
581 let OllamaToolCall { id, function } = tool_call;
582 let id = id.unwrap_or_else(|| {
583 format!(
584 "{}-{}",
585 &function.name,
586 TOOL_CALL_COUNTER.fetch_add(1, Ordering::Relaxed)
587 )
588 });
589 let event = LanguageModelCompletionEvent::ToolUse(LanguageModelToolUse {
590 id: LanguageModelToolUseId::from(id),
591 name: Arc::from(function.name),
592 raw_input: function.arguments.to_string(),
593 input: function.arguments,
594 is_input_complete: true,
595 });
596 events.push(Ok(event));
597 state.used_tools = true;
598 } else if !content.is_empty() {
599 events.push(Ok(LanguageModelCompletionEvent::Text(content)));
600 }
601 }
602 };
603
604 if delta.done {
605 events.push(Ok(LanguageModelCompletionEvent::UsageUpdate(TokenUsage {
606 input_tokens: delta.prompt_eval_count.unwrap_or(0),
607 output_tokens: delta.eval_count.unwrap_or(0),
608 cache_creation_input_tokens: 0,
609 cache_read_input_tokens: 0,
610 })));
611 if state.used_tools {
612 state.used_tools = false;
613 events.push(Ok(LanguageModelCompletionEvent::Stop(StopReason::ToolUse)));
614 } else {
615 events.push(Ok(LanguageModelCompletionEvent::Stop(StopReason::EndTurn)));
616 }
617 }
618
619 Some((events, state))
620 },
621 );
622
623 stream.flat_map(futures::stream::iter)
624}
625
626struct ConfigurationView {
627 api_key_editor: Entity<InputField>,
628 api_url_editor: Entity<InputField>,
629 state: Entity<State>,
630}
631
632impl ConfigurationView {
633 pub fn new(state: Entity<State>, window: &mut Window, cx: &mut Context<Self>) -> Self {
634 let api_key_editor = cx.new(|cx| InputField::new(window, cx, "63e02e...").label("API key"));
635
636 let api_url_editor = cx.new(|cx| {
637 let input = InputField::new(window, cx, OLLAMA_API_URL).label("API URL");
638 input.set_text(OllamaLanguageModelProvider::api_url(cx), window, cx);
639 input
640 });
641
642 cx.observe(&state, |_, _, cx| {
643 cx.notify();
644 })
645 .detach();
646
647 Self {
648 api_key_editor,
649 api_url_editor,
650 state,
651 }
652 }
653
654 fn retry_connection(&self, cx: &mut App) {
655 self.state
656 .update(cx, |state, cx| state.restart_fetch_models_task(cx));
657 }
658
659 fn save_api_key(&mut self, _: &menu::Confirm, window: &mut Window, cx: &mut Context<Self>) {
660 let api_key = self.api_key_editor.read(cx).text(cx).trim().to_string();
661 if api_key.is_empty() {
662 return;
663 }
664
665 // url changes can cause the editor to be displayed again
666 self.api_key_editor
667 .update(cx, |input, cx| input.set_text("", window, cx));
668
669 let state = self.state.clone();
670 cx.spawn_in(window, async move |_, cx| {
671 state
672 .update(cx, |state, cx| state.set_api_key(Some(api_key), cx))?
673 .await
674 })
675 .detach_and_log_err(cx);
676 }
677
678 fn reset_api_key(&mut self, window: &mut Window, cx: &mut Context<Self>) {
679 self.api_key_editor
680 .update(cx, |input, cx| input.set_text("", window, cx));
681
682 let state = self.state.clone();
683 cx.spawn_in(window, async move |_, cx| {
684 state
685 .update(cx, |state, cx| state.set_api_key(None, cx))?
686 .await
687 })
688 .detach_and_log_err(cx);
689
690 cx.notify();
691 }
692
693 fn save_api_url(&mut self, cx: &mut Context<Self>) {
694 let api_url = self.api_url_editor.read(cx).text(cx).trim().to_string();
695 let current_url = OllamaLanguageModelProvider::api_url(cx);
696 if !api_url.is_empty() && &api_url != ¤t_url {
697 let fs = <dyn Fs>::global(cx);
698 update_settings_file(fs, cx, move |settings, _| {
699 settings
700 .language_models
701 .get_or_insert_default()
702 .ollama
703 .get_or_insert_default()
704 .api_url = Some(api_url);
705 });
706 }
707 }
708
709 fn reset_api_url(&mut self, window: &mut Window, cx: &mut Context<Self>) {
710 self.api_url_editor
711 .update(cx, |input, cx| input.set_text("", window, cx));
712 let fs = <dyn Fs>::global(cx);
713 update_settings_file(fs, cx, |settings, _cx| {
714 if let Some(settings) = settings
715 .language_models
716 .as_mut()
717 .and_then(|models| models.ollama.as_mut())
718 {
719 settings.api_url = Some(OLLAMA_API_URL.into());
720 }
721 });
722 cx.notify();
723 }
724
725 fn render_instructions() -> Div {
726 v_flex()
727 .gap_2()
728 .child(Label::new(
729 "Run LLMs locally on your machine with Ollama, or connect to an Ollama server. \
730 Can provide access to Llama, Mistral, Gemma, and hundreds of other models.",
731 ))
732 .child(Label::new("To use local Ollama:"))
733 .child(
734 List::new()
735 .child(InstructionListItem::new(
736 "Download and install Ollama from",
737 Some("ollama.com"),
738 Some("https://ollama.com/download"),
739 ))
740 .child(InstructionListItem::text_only(
741 "Start Ollama and download a model: `ollama run gpt-oss:20b`",
742 ))
743 .child(InstructionListItem::text_only(
744 "Click 'Connect' below to start using Ollama in Zed",
745 )),
746 )
747 .child(Label::new(
748 "Alternatively, you can connect to an Ollama server by specifying its \
749 URL and API key (may not be required):",
750 ))
751 }
752
753 fn render_api_key_editor(&self, cx: &Context<Self>) -> impl IntoElement {
754 let state = self.state.read(cx);
755 let env_var_set = state.api_key_state.is_from_env_var();
756 let configured_card_label = if env_var_set {
757 format!("API key set in {API_KEY_ENV_VAR_NAME} environment variable.")
758 } else {
759 "API key configured".to_string()
760 };
761
762 if !state.api_key_state.has_key() {
763 v_flex()
764 .on_action(cx.listener(Self::save_api_key))
765 .child(self.api_key_editor.clone())
766 .child(
767 Label::new(
768 format!("You can also assign the {API_KEY_ENV_VAR_NAME} environment variable and restart Zed.")
769 )
770 .size(LabelSize::Small)
771 .color(Color::Muted),
772 )
773 .into_any_element()
774 } else {
775 ConfiguredApiCard::new(configured_card_label)
776 .disabled(env_var_set)
777 .on_click(cx.listener(|this, _, window, cx| this.reset_api_key(window, cx)))
778 .when(env_var_set, |this| {
779 this.tooltip_label(format!("To reset your API key, unset the {API_KEY_ENV_VAR_NAME} environment variable."))
780 })
781 .into_any_element()
782 }
783 }
784
785 fn render_api_url_editor(&self, cx: &Context<Self>) -> Div {
786 let api_url = OllamaLanguageModelProvider::api_url(cx);
787 let custom_api_url_set = api_url != OLLAMA_API_URL;
788
789 if custom_api_url_set {
790 h_flex()
791 .p_3()
792 .justify_between()
793 .rounded_md()
794 .border_1()
795 .border_color(cx.theme().colors().border)
796 .bg(cx.theme().colors().elevated_surface_background)
797 .child(
798 h_flex()
799 .gap_2()
800 .child(Icon::new(IconName::Check).color(Color::Success))
801 .child(v_flex().gap_1().child(Label::new(api_url))),
802 )
803 .child(
804 Button::new("reset-api-url", "Reset API URL")
805 .label_size(LabelSize::Small)
806 .icon(IconName::Undo)
807 .icon_size(IconSize::Small)
808 .icon_position(IconPosition::Start)
809 .layer(ElevationIndex::ModalSurface)
810 .on_click(
811 cx.listener(|this, _, window, cx| this.reset_api_url(window, cx)),
812 ),
813 )
814 } else {
815 v_flex()
816 .on_action(cx.listener(|this, _: &menu::Confirm, _window, cx| {
817 this.save_api_url(cx);
818 cx.notify();
819 }))
820 .gap_2()
821 .child(self.api_url_editor.clone())
822 }
823 }
824}
825
826impl Render for ConfigurationView {
827 fn render(&mut self, _: &mut Window, cx: &mut Context<Self>) -> impl IntoElement {
828 let is_authenticated = self.state.read(cx).is_authenticated();
829
830 v_flex()
831 .gap_2()
832 .child(Self::render_instructions())
833 .child(self.render_api_url_editor(cx))
834 .child(self.render_api_key_editor(cx))
835 .child(
836 h_flex()
837 .w_full()
838 .justify_between()
839 .gap_2()
840 .child(
841 h_flex()
842 .w_full()
843 .gap_2()
844 .map(|this| {
845 if is_authenticated {
846 this.child(
847 Button::new("ollama-site", "Ollama")
848 .style(ButtonStyle::Subtle)
849 .icon(IconName::ArrowUpRight)
850 .icon_size(IconSize::XSmall)
851 .icon_color(Color::Muted)
852 .on_click(move |_, _, cx| cx.open_url(OLLAMA_SITE))
853 .into_any_element(),
854 )
855 } else {
856 this.child(
857 Button::new("download_ollama_button", "Download Ollama")
858 .style(ButtonStyle::Subtle)
859 .icon(IconName::ArrowUpRight)
860 .icon_size(IconSize::XSmall)
861 .icon_color(Color::Muted)
862 .on_click(move |_, _, cx| {
863 cx.open_url(OLLAMA_DOWNLOAD_URL)
864 })
865 .into_any_element(),
866 )
867 }
868 })
869 .child(
870 Button::new("view-models", "View All Models")
871 .style(ButtonStyle::Subtle)
872 .icon(IconName::ArrowUpRight)
873 .icon_size(IconSize::XSmall)
874 .icon_color(Color::Muted)
875 .on_click(move |_, _, cx| cx.open_url(OLLAMA_LIBRARY_URL)),
876 ),
877 )
878 .map(|this| {
879 if is_authenticated {
880 this.child(
881 ButtonLike::new("connected")
882 .disabled(true)
883 .cursor_style(CursorStyle::Arrow)
884 .child(
885 h_flex()
886 .gap_2()
887 .child(Icon::new(IconName::Check).color(Color::Success))
888 .child(Label::new("Connected"))
889 .into_any_element(),
890 )
891 .child(
892 IconButton::new("refresh-models", IconName::RotateCcw)
893 .tooltip(Tooltip::text("Refresh Models"))
894 .on_click(cx.listener(|this, _, _, cx| {
895 this.state.update(cx, |state, _| {
896 state.fetched_models.clear();
897 });
898 this.retry_connection(cx);
899 })),
900 ),
901 )
902 } else {
903 this.child(
904 Button::new("retry_ollama_models", "Connect")
905 .icon_position(IconPosition::Start)
906 .icon_size(IconSize::XSmall)
907 .icon(IconName::PlayOutlined)
908 .on_click(
909 cx.listener(move |this, _, _, cx| {
910 this.retry_connection(cx)
911 }),
912 ),
913 )
914 }
915 }),
916 )
917 }
918}
919
920fn tool_into_ollama(tool: LanguageModelRequestTool) -> ollama::OllamaTool {
921 ollama::OllamaTool::Function {
922 function: OllamaFunctionTool {
923 name: tool.name,
924 description: Some(tool.description),
925 parameters: Some(tool.input_schema),
926 },
927 }
928}