1use anyhow::{Result, anyhow};
2use fs::Fs;
3use futures::{FutureExt, StreamExt, future::BoxFuture, stream::BoxStream};
4use futures::{Stream, TryFutureExt, stream};
5use gpui::{AnyView, App, AsyncApp, Context, CursorStyle, Entity, Task};
6use http_client::HttpClient;
7use language_model::{
8 AuthenticateError, LanguageModel, LanguageModelCompletionError, LanguageModelCompletionEvent,
9 LanguageModelId, LanguageModelName, LanguageModelProvider, LanguageModelProviderId,
10 LanguageModelProviderName, LanguageModelProviderState, LanguageModelRequest,
11 LanguageModelRequestTool, LanguageModelToolChoice, LanguageModelToolUse,
12 LanguageModelToolUseId, MessageContent, RateLimiter, Role, StopReason, TokenUsage,
13};
14use menu;
15use ollama::{
16 ChatMessage, ChatOptions, ChatRequest, ChatResponseDelta, OLLAMA_API_URL, OllamaFunctionCall,
17 OllamaFunctionTool, OllamaToolCall, get_models, show_model, stream_chat_completion,
18};
19pub use settings::OllamaAvailableModel as AvailableModel;
20use settings::{Settings, SettingsStore, update_settings_file};
21use std::pin::Pin;
22use std::sync::LazyLock;
23use std::sync::atomic::{AtomicU64, Ordering};
24use std::{collections::HashMap, sync::Arc};
25use ui::{ButtonLike, ElevationIndex, List, Tooltip, prelude::*};
26use ui_input::SingleLineInput;
27use zed_env_vars::{EnvVar, env_var};
28
29use crate::AllLanguageModelSettings;
30use crate::api_key::ApiKeyState;
31use crate::ui::InstructionListItem;
32
33const OLLAMA_DOWNLOAD_URL: &str = "https://ollama.com/download";
34const OLLAMA_LIBRARY_URL: &str = "https://ollama.com/library";
35const OLLAMA_SITE: &str = "https://ollama.com/";
36
37const PROVIDER_ID: LanguageModelProviderId = LanguageModelProviderId::new("ollama");
38const PROVIDER_NAME: LanguageModelProviderName = LanguageModelProviderName::new("Ollama");
39
40const API_KEY_ENV_VAR_NAME: &str = "OLLAMA_API_KEY";
41static API_KEY_ENV_VAR: LazyLock<EnvVar> = env_var!(API_KEY_ENV_VAR_NAME);
42
43#[derive(Default, Debug, Clone, PartialEq)]
44pub struct OllamaSettings {
45 pub api_url: String,
46 pub available_models: Vec<AvailableModel>,
47}
48
49pub struct OllamaLanguageModelProvider {
50 http_client: Arc<dyn HttpClient>,
51 state: Entity<State>,
52}
53
54pub struct State {
55 api_key_state: ApiKeyState,
56 http_client: Arc<dyn HttpClient>,
57 fetched_models: Vec<ollama::Model>,
58 fetch_model_task: Option<Task<Result<()>>>,
59}
60
61impl State {
62 fn is_authenticated(&self) -> bool {
63 !self.fetched_models.is_empty()
64 }
65
66 fn set_api_key(&mut self, api_key: Option<String>, cx: &mut Context<Self>) -> Task<Result<()>> {
67 let api_url = OllamaLanguageModelProvider::api_url(cx);
68 let task = self
69 .api_key_state
70 .store(api_url, api_key, |this| &mut this.api_key_state, cx);
71
72 self.fetched_models.clear();
73 cx.spawn(async move |this, cx| {
74 let result = task.await;
75 this.update(cx, |this, cx| this.restart_fetch_models_task(cx))
76 .ok();
77 result
78 })
79 }
80
81 fn authenticate(&mut self, cx: &mut Context<Self>) -> Task<Result<(), AuthenticateError>> {
82 let api_url = OllamaLanguageModelProvider::api_url(cx);
83 let task = self.api_key_state.load_if_needed(
84 api_url,
85 &API_KEY_ENV_VAR,
86 |this| &mut this.api_key_state,
87 cx,
88 );
89
90 // Always try to fetch models - if no API key is needed (local Ollama), it will work
91 // If API key is needed and provided, it will work
92 // If API key is needed and not provided, it will fail gracefully
93 cx.spawn(async move |this, cx| {
94 let result = task.await;
95 this.update(cx, |this, cx| this.restart_fetch_models_task(cx))
96 .ok();
97 result
98 })
99 }
100
101 fn fetch_models(&mut self, cx: &mut Context<Self>) -> Task<Result<()>> {
102 let http_client = Arc::clone(&self.http_client);
103 let api_url = OllamaLanguageModelProvider::api_url(cx);
104 let api_key = self.api_key_state.key(&api_url);
105
106 // As a proxy for the server being "authenticated", we'll check if its up by fetching the models
107 cx.spawn(async move |this, cx| {
108 let models = get_models(http_client.as_ref(), &api_url, api_key.as_deref()).await?;
109
110 let tasks = models
111 .into_iter()
112 // Since there is no metadata from the Ollama API
113 // indicating which models are embedding models,
114 // simply filter out models with "-embed" in their name
115 .filter(|model| !model.name.contains("-embed"))
116 .map(|model| {
117 let http_client = Arc::clone(&http_client);
118 let api_url = api_url.clone();
119 let api_key = api_key.clone();
120 async move {
121 let name = model.name.as_str();
122 let model =
123 show_model(http_client.as_ref(), &api_url, api_key.as_deref(), name)
124 .await?;
125 let ollama_model = ollama::Model::new(
126 name,
127 None,
128 model.context_length,
129 Some(model.supports_tools()),
130 Some(model.supports_vision()),
131 Some(model.supports_thinking()),
132 );
133 Ok(ollama_model)
134 }
135 });
136
137 // Rate-limit capability fetches
138 // since there is an arbitrary number of models available
139 let mut ollama_models: Vec<_> = futures::stream::iter(tasks)
140 .buffer_unordered(5)
141 .collect::<Vec<Result<_>>>()
142 .await
143 .into_iter()
144 .collect::<Result<Vec<_>>>()?;
145
146 ollama_models.sort_by(|a, b| a.name.cmp(&b.name));
147
148 this.update(cx, |this, cx| {
149 this.fetched_models = ollama_models;
150 cx.notify();
151 })
152 })
153 }
154
155 fn restart_fetch_models_task(&mut self, cx: &mut Context<Self>) {
156 let task = self.fetch_models(cx);
157 self.fetch_model_task.replace(task);
158 }
159}
160
161impl OllamaLanguageModelProvider {
162 pub fn new(http_client: Arc<dyn HttpClient>, cx: &mut App) -> Self {
163 let this = Self {
164 http_client: http_client.clone(),
165 state: cx.new(|cx| {
166 cx.observe_global::<SettingsStore>({
167 let mut last_settings = OllamaLanguageModelProvider::settings(cx).clone();
168 move |this: &mut State, cx| {
169 let current_settings = OllamaLanguageModelProvider::settings(cx);
170 let settings_changed = current_settings != &last_settings;
171 if settings_changed {
172 let url_changed = last_settings.api_url != current_settings.api_url;
173 last_settings = current_settings.clone();
174 if url_changed {
175 this.fetched_models.clear();
176 this.authenticate(cx).detach();
177 }
178 cx.notify();
179 }
180 }
181 })
182 .detach();
183
184 State {
185 http_client,
186 fetched_models: Default::default(),
187 fetch_model_task: None,
188 api_key_state: ApiKeyState::new(Self::api_url(cx)),
189 }
190 }),
191 };
192 this
193 }
194
195 fn settings(cx: &App) -> &OllamaSettings {
196 &AllLanguageModelSettings::get_global(cx).ollama
197 }
198
199 fn api_url(cx: &App) -> SharedString {
200 let api_url = &Self::settings(cx).api_url;
201 if api_url.is_empty() {
202 OLLAMA_API_URL.into()
203 } else {
204 SharedString::new(api_url.as_str())
205 }
206 }
207}
208
209impl LanguageModelProviderState for OllamaLanguageModelProvider {
210 type ObservableEntity = State;
211
212 fn observable_entity(&self) -> Option<Entity<Self::ObservableEntity>> {
213 Some(self.state.clone())
214 }
215}
216
217impl LanguageModelProvider for OllamaLanguageModelProvider {
218 fn id(&self) -> LanguageModelProviderId {
219 PROVIDER_ID
220 }
221
222 fn name(&self) -> LanguageModelProviderName {
223 PROVIDER_NAME
224 }
225
226 fn icon(&self) -> IconName {
227 IconName::AiOllama
228 }
229
230 fn default_model(&self, _: &App) -> Option<Arc<dyn LanguageModel>> {
231 // We shouldn't try to select default model, because it might lead to a load call for an unloaded model.
232 // In a constrained environment where user might not have enough resources it'll be a bad UX to select something
233 // to load by default.
234 None
235 }
236
237 fn default_fast_model(&self, _: &App) -> Option<Arc<dyn LanguageModel>> {
238 // See explanation for default_model.
239 None
240 }
241
242 fn provided_models(&self, cx: &App) -> Vec<Arc<dyn LanguageModel>> {
243 let mut models: HashMap<String, ollama::Model> = HashMap::new();
244
245 // Add models from the Ollama API
246 for model in self.state.read(cx).fetched_models.iter() {
247 models.insert(model.name.clone(), model.clone());
248 }
249
250 // Override with available models from settings
251 for setting_model in &OllamaLanguageModelProvider::settings(cx).available_models {
252 let setting_base = setting_model.name.split(':').next().unwrap();
253 if let Some(model) = models
254 .values_mut()
255 .find(|m| m.name.split(':').next().unwrap() == setting_base)
256 {
257 model.max_tokens = setting_model.max_tokens;
258 model.display_name = setting_model.display_name.clone();
259 model.keep_alive = setting_model.keep_alive.clone();
260 model.supports_tools = setting_model.supports_tools;
261 model.supports_vision = setting_model.supports_images;
262 model.supports_thinking = setting_model.supports_thinking;
263 } else {
264 models.insert(
265 setting_model.name.clone(),
266 ollama::Model {
267 name: setting_model.name.clone(),
268 display_name: setting_model.display_name.clone(),
269 max_tokens: setting_model.max_tokens,
270 keep_alive: setting_model.keep_alive.clone(),
271 supports_tools: setting_model.supports_tools,
272 supports_vision: setting_model.supports_images,
273 supports_thinking: setting_model.supports_thinking,
274 },
275 );
276 }
277 }
278
279 let mut models = models
280 .into_values()
281 .map(|model| {
282 Arc::new(OllamaLanguageModel {
283 id: LanguageModelId::from(model.name.clone()),
284 model,
285 http_client: self.http_client.clone(),
286 request_limiter: RateLimiter::new(4),
287 state: self.state.clone(),
288 }) as Arc<dyn LanguageModel>
289 })
290 .collect::<Vec<_>>();
291 models.sort_by_key(|model| model.name());
292 models
293 }
294
295 fn is_authenticated(&self, cx: &App) -> bool {
296 self.state.read(cx).is_authenticated()
297 }
298
299 fn authenticate(&self, cx: &mut App) -> Task<Result<(), AuthenticateError>> {
300 self.state.update(cx, |state, cx| state.authenticate(cx))
301 }
302
303 fn configuration_view(
304 &self,
305 _target_agent: language_model::ConfigurationViewTargetAgent,
306 window: &mut Window,
307 cx: &mut App,
308 ) -> AnyView {
309 let state = self.state.clone();
310 cx.new(|cx| ConfigurationView::new(state, window, cx))
311 .into()
312 }
313
314 fn reset_credentials(&self, cx: &mut App) -> Task<Result<()>> {
315 self.state
316 .update(cx, |state, cx| state.set_api_key(None, cx))
317 }
318}
319
320pub struct OllamaLanguageModel {
321 id: LanguageModelId,
322 model: ollama::Model,
323 http_client: Arc<dyn HttpClient>,
324 request_limiter: RateLimiter,
325 state: Entity<State>,
326}
327
328impl OllamaLanguageModel {
329 fn to_ollama_request(&self, request: LanguageModelRequest) -> ChatRequest {
330 let supports_vision = self.model.supports_vision.unwrap_or(false);
331
332 let mut messages = Vec::with_capacity(request.messages.len());
333
334 for mut msg in request.messages.into_iter() {
335 let images = if supports_vision {
336 msg.content
337 .iter()
338 .filter_map(|content| match content {
339 MessageContent::Image(image) => Some(image.source.to_string()),
340 _ => None,
341 })
342 .collect::<Vec<String>>()
343 } else {
344 vec![]
345 };
346
347 match msg.role {
348 Role::User => {
349 for tool_result in msg
350 .content
351 .extract_if(.., |x| matches!(x, MessageContent::ToolResult(..)))
352 {
353 match tool_result {
354 MessageContent::ToolResult(tool_result) => {
355 messages.push(ChatMessage::Tool {
356 tool_name: tool_result.tool_name.to_string(),
357 content: tool_result.content.to_str().unwrap_or("").to_string(),
358 })
359 }
360 _ => unreachable!("Only tool result should be extracted"),
361 }
362 }
363 if !msg.content.is_empty() {
364 messages.push(ChatMessage::User {
365 content: msg.string_contents(),
366 images: if images.is_empty() {
367 None
368 } else {
369 Some(images)
370 },
371 })
372 }
373 }
374 Role::Assistant => {
375 let content = msg.string_contents();
376 let mut thinking = None;
377 let mut tool_calls = Vec::new();
378 for content in msg.content.into_iter() {
379 match content {
380 MessageContent::Thinking { text, .. } if !text.is_empty() => {
381 thinking = Some(text)
382 }
383 MessageContent::ToolUse(tool_use) => {
384 tool_calls.push(OllamaToolCall::Function(OllamaFunctionCall {
385 name: tool_use.name.to_string(),
386 arguments: tool_use.input,
387 }));
388 }
389 _ => (),
390 }
391 }
392 messages.push(ChatMessage::Assistant {
393 content,
394 tool_calls: Some(tool_calls),
395 images: if images.is_empty() {
396 None
397 } else {
398 Some(images)
399 },
400 thinking,
401 })
402 }
403 Role::System => messages.push(ChatMessage::System {
404 content: msg.string_contents(),
405 }),
406 }
407 }
408 ChatRequest {
409 model: self.model.name.clone(),
410 messages,
411 keep_alive: self.model.keep_alive.clone().unwrap_or_default(),
412 stream: true,
413 options: Some(ChatOptions {
414 num_ctx: Some(self.model.max_tokens),
415 stop: Some(request.stop),
416 temperature: request.temperature.or(Some(1.0)),
417 ..Default::default()
418 }),
419 think: self
420 .model
421 .supports_thinking
422 .map(|supports_thinking| supports_thinking && request.thinking_allowed),
423 tools: if self.model.supports_tools.unwrap_or(false) {
424 request.tools.into_iter().map(tool_into_ollama).collect()
425 } else {
426 vec![]
427 },
428 }
429 }
430}
431
432impl LanguageModel for OllamaLanguageModel {
433 fn id(&self) -> LanguageModelId {
434 self.id.clone()
435 }
436
437 fn name(&self) -> LanguageModelName {
438 LanguageModelName::from(self.model.display_name().to_string())
439 }
440
441 fn provider_id(&self) -> LanguageModelProviderId {
442 PROVIDER_ID
443 }
444
445 fn provider_name(&self) -> LanguageModelProviderName {
446 PROVIDER_NAME
447 }
448
449 fn supports_tools(&self) -> bool {
450 self.model.supports_tools.unwrap_or(false)
451 }
452
453 fn supports_images(&self) -> bool {
454 self.model.supports_vision.unwrap_or(false)
455 }
456
457 fn supports_tool_choice(&self, choice: LanguageModelToolChoice) -> bool {
458 match choice {
459 LanguageModelToolChoice::Auto => false,
460 LanguageModelToolChoice::Any => false,
461 LanguageModelToolChoice::None => false,
462 }
463 }
464
465 fn telemetry_id(&self) -> String {
466 format!("ollama/{}", self.model.id())
467 }
468
469 fn max_token_count(&self) -> u64 {
470 self.model.max_token_count()
471 }
472
473 fn count_tokens(
474 &self,
475 request: LanguageModelRequest,
476 _cx: &App,
477 ) -> BoxFuture<'static, Result<u64>> {
478 // There is no endpoint for this _yet_ in Ollama
479 // see: https://github.com/ollama/ollama/issues/1716 and https://github.com/ollama/ollama/issues/3582
480 let token_count = request
481 .messages
482 .iter()
483 .map(|msg| msg.string_contents().chars().count())
484 .sum::<usize>()
485 / 4;
486
487 async move { Ok(token_count as u64) }.boxed()
488 }
489
490 fn stream_completion(
491 &self,
492 request: LanguageModelRequest,
493 cx: &AsyncApp,
494 ) -> BoxFuture<
495 'static,
496 Result<
497 BoxStream<'static, Result<LanguageModelCompletionEvent, LanguageModelCompletionError>>,
498 LanguageModelCompletionError,
499 >,
500 > {
501 let request = self.to_ollama_request(request);
502
503 let http_client = self.http_client.clone();
504 let Ok((api_key, api_url)) = self.state.read_with(cx, |state, cx| {
505 let api_url = OllamaLanguageModelProvider::api_url(cx);
506 (state.api_key_state.key(&api_url), api_url)
507 }) else {
508 return futures::future::ready(Err(anyhow!("App state dropped").into())).boxed();
509 };
510
511 let future = self.request_limiter.stream(async move {
512 let stream =
513 stream_chat_completion(http_client.as_ref(), &api_url, api_key.as_deref(), request)
514 .await?;
515 let stream = map_to_language_model_completion_events(stream);
516 Ok(stream)
517 });
518
519 future.map_ok(|f| f.boxed()).boxed()
520 }
521}
522
523fn map_to_language_model_completion_events(
524 stream: Pin<Box<dyn Stream<Item = anyhow::Result<ChatResponseDelta>> + Send>>,
525) -> impl Stream<Item = Result<LanguageModelCompletionEvent, LanguageModelCompletionError>> {
526 // Used for creating unique tool use ids
527 static TOOL_CALL_COUNTER: AtomicU64 = AtomicU64::new(0);
528
529 struct State {
530 stream: Pin<Box<dyn Stream<Item = anyhow::Result<ChatResponseDelta>> + Send>>,
531 used_tools: bool,
532 }
533
534 // We need to create a ToolUse and Stop event from a single
535 // response from the original stream
536 let stream = stream::unfold(
537 State {
538 stream,
539 used_tools: false,
540 },
541 async move |mut state| {
542 let response = state.stream.next().await?;
543
544 let delta = match response {
545 Ok(delta) => delta,
546 Err(e) => {
547 let event = Err(LanguageModelCompletionError::from(anyhow!(e)));
548 return Some((vec![event], state));
549 }
550 };
551
552 let mut events = Vec::new();
553
554 match delta.message {
555 ChatMessage::User { content, images: _ } => {
556 events.push(Ok(LanguageModelCompletionEvent::Text(content)));
557 }
558 ChatMessage::System { content } => {
559 events.push(Ok(LanguageModelCompletionEvent::Text(content)));
560 }
561 ChatMessage::Tool { content, .. } => {
562 events.push(Ok(LanguageModelCompletionEvent::Text(content)));
563 }
564 ChatMessage::Assistant {
565 content,
566 tool_calls,
567 images: _,
568 thinking,
569 } => {
570 if let Some(text) = thinking {
571 events.push(Ok(LanguageModelCompletionEvent::Thinking {
572 text,
573 signature: None,
574 }));
575 }
576
577 if let Some(tool_call) = tool_calls.and_then(|v| v.into_iter().next()) {
578 match tool_call {
579 OllamaToolCall::Function(function) => {
580 let tool_id = format!(
581 "{}-{}",
582 &function.name,
583 TOOL_CALL_COUNTER.fetch_add(1, Ordering::Relaxed)
584 );
585 let event =
586 LanguageModelCompletionEvent::ToolUse(LanguageModelToolUse {
587 id: LanguageModelToolUseId::from(tool_id),
588 name: Arc::from(function.name),
589 raw_input: function.arguments.to_string(),
590 input: function.arguments,
591 is_input_complete: true,
592 });
593 events.push(Ok(event));
594 state.used_tools = true;
595 }
596 }
597 } else if !content.is_empty() {
598 events.push(Ok(LanguageModelCompletionEvent::Text(content)));
599 }
600 }
601 };
602
603 if delta.done {
604 events.push(Ok(LanguageModelCompletionEvent::UsageUpdate(TokenUsage {
605 input_tokens: delta.prompt_eval_count.unwrap_or(0),
606 output_tokens: delta.eval_count.unwrap_or(0),
607 cache_creation_input_tokens: 0,
608 cache_read_input_tokens: 0,
609 })));
610 if state.used_tools {
611 state.used_tools = false;
612 events.push(Ok(LanguageModelCompletionEvent::Stop(StopReason::ToolUse)));
613 } else {
614 events.push(Ok(LanguageModelCompletionEvent::Stop(StopReason::EndTurn)));
615 }
616 }
617
618 Some((events, state))
619 },
620 );
621
622 stream.flat_map(futures::stream::iter)
623}
624
625struct ConfigurationView {
626 api_key_editor: Entity<SingleLineInput>,
627 api_url_editor: Entity<SingleLineInput>,
628 state: Entity<State>,
629}
630
631impl ConfigurationView {
632 pub fn new(state: Entity<State>, window: &mut Window, cx: &mut Context<Self>) -> Self {
633 let api_key_editor =
634 cx.new(|cx| SingleLineInput::new(window, cx, "63e02e...").label("API key"));
635
636 let api_url_editor = cx.new(|cx| {
637 let input = SingleLineInput::new(window, cx, OLLAMA_API_URL).label("API URL");
638 input.set_text(OllamaLanguageModelProvider::api_url(cx), window, cx);
639 input
640 });
641
642 cx.observe(&state, |_, _, cx| {
643 cx.notify();
644 })
645 .detach();
646
647 Self {
648 api_key_editor,
649 api_url_editor,
650 state,
651 }
652 }
653
654 fn retry_connection(&self, cx: &mut App) {
655 self.state
656 .update(cx, |state, cx| state.restart_fetch_models_task(cx));
657 }
658
659 fn save_api_key(&mut self, _: &menu::Confirm, window: &mut Window, cx: &mut Context<Self>) {
660 let api_key = self.api_key_editor.read(cx).text(cx).trim().to_string();
661 if api_key.is_empty() {
662 return;
663 }
664
665 // url changes can cause the editor to be displayed again
666 self.api_key_editor
667 .update(cx, |input, cx| input.set_text("", window, cx));
668
669 let state = self.state.clone();
670 cx.spawn_in(window, async move |_, cx| {
671 state
672 .update(cx, |state, cx| state.set_api_key(Some(api_key), cx))?
673 .await
674 })
675 .detach_and_log_err(cx);
676 }
677
678 fn reset_api_key(&mut self, window: &mut Window, cx: &mut Context<Self>) {
679 self.api_key_editor
680 .update(cx, |input, cx| input.set_text("", window, cx));
681
682 let state = self.state.clone();
683 cx.spawn_in(window, async move |_, cx| {
684 state
685 .update(cx, |state, cx| state.set_api_key(None, cx))?
686 .await
687 })
688 .detach_and_log_err(cx);
689
690 cx.notify();
691 }
692
693 fn save_api_url(&mut self, cx: &mut Context<Self>) {
694 let api_url = self.api_url_editor.read(cx).text(cx).trim().to_string();
695 let current_url = OllamaLanguageModelProvider::api_url(cx);
696 if !api_url.is_empty() && &api_url != ¤t_url {
697 let fs = <dyn Fs>::global(cx);
698 update_settings_file(fs, cx, move |settings, _| {
699 settings
700 .language_models
701 .get_or_insert_default()
702 .ollama
703 .get_or_insert_default()
704 .api_url = Some(api_url);
705 });
706 }
707 }
708
709 fn reset_api_url(&mut self, window: &mut Window, cx: &mut Context<Self>) {
710 self.api_url_editor
711 .update(cx, |input, cx| input.set_text("", window, cx));
712 let fs = <dyn Fs>::global(cx);
713 update_settings_file(fs, cx, |settings, _cx| {
714 if let Some(settings) = settings
715 .language_models
716 .as_mut()
717 .and_then(|models| models.ollama.as_mut())
718 {
719 settings.api_url = Some(OLLAMA_API_URL.into());
720 }
721 });
722 cx.notify();
723 }
724
725 fn render_instructions() -> Div {
726 v_flex()
727 .gap_2()
728 .child(Label::new(
729 "Run LLMs locally on your machine with Ollama, or connect to an Ollama server. \
730 Can provide access to Llama, Mistral, Gemma, and hundreds of other models.",
731 ))
732 .child(Label::new("To use local Ollama:"))
733 .child(
734 List::new()
735 .child(InstructionListItem::new(
736 "Download and install Ollama from",
737 Some("ollama.com"),
738 Some("https://ollama.com/download"),
739 ))
740 .child(InstructionListItem::text_only(
741 "Start Ollama and download a model: `ollama run gpt-oss:20b`",
742 ))
743 .child(InstructionListItem::text_only(
744 "Click 'Connect' below to start using Ollama in Zed",
745 )),
746 )
747 .child(Label::new(
748 "Alternatively, you can connect to an Ollama server by specifying its \
749 URL and API key (may not be required):",
750 ))
751 }
752
753 fn render_api_key_editor(&self, cx: &Context<Self>) -> Div {
754 let state = self.state.read(cx);
755 let env_var_set = state.api_key_state.is_from_env_var();
756
757 if !state.api_key_state.has_key() {
758 v_flex()
759 .on_action(cx.listener(Self::save_api_key))
760 .child(self.api_key_editor.clone())
761 .child(
762 Label::new(
763 format!("You can also assign the {API_KEY_ENV_VAR_NAME} environment variable and restart Zed.")
764 )
765 .size(LabelSize::Small)
766 .color(Color::Muted),
767 )
768 } else {
769 h_flex()
770 .p_3()
771 .justify_between()
772 .rounded_md()
773 .border_1()
774 .border_color(cx.theme().colors().border)
775 .bg(cx.theme().colors().elevated_surface_background)
776 .child(
777 h_flex()
778 .gap_2()
779 .child(Icon::new(IconName::Check).color(Color::Success))
780 .child(
781 Label::new(
782 if env_var_set {
783 format!("API key set in {API_KEY_ENV_VAR_NAME} environment variable.")
784 } else {
785 "API key configured".to_string()
786 }
787 )
788 )
789 )
790 .child(
791 Button::new("reset-api-key", "Reset API Key")
792 .label_size(LabelSize::Small)
793 .icon(IconName::Undo)
794 .icon_size(IconSize::Small)
795 .icon_position(IconPosition::Start)
796 .layer(ElevationIndex::ModalSurface)
797 .when(env_var_set, |this| {
798 this.tooltip(Tooltip::text(format!("To reset your API key, unset the {API_KEY_ENV_VAR_NAME} environment variable.")))
799 })
800 .on_click(cx.listener(|this, _, window, cx| this.reset_api_key(window, cx))),
801 )
802 }
803 }
804
805 fn render_api_url_editor(&self, cx: &Context<Self>) -> Div {
806 let api_url = OllamaLanguageModelProvider::api_url(cx);
807 let custom_api_url_set = api_url != OLLAMA_API_URL;
808
809 if custom_api_url_set {
810 h_flex()
811 .p_3()
812 .justify_between()
813 .rounded_md()
814 .border_1()
815 .border_color(cx.theme().colors().border)
816 .bg(cx.theme().colors().elevated_surface_background)
817 .child(
818 h_flex()
819 .gap_2()
820 .child(Icon::new(IconName::Check).color(Color::Success))
821 .child(v_flex().gap_1().child(Label::new(api_url))),
822 )
823 .child(
824 Button::new("reset-api-url", "Reset API URL")
825 .label_size(LabelSize::Small)
826 .icon(IconName::Undo)
827 .icon_size(IconSize::Small)
828 .icon_position(IconPosition::Start)
829 .layer(ElevationIndex::ModalSurface)
830 .on_click(
831 cx.listener(|this, _, window, cx| this.reset_api_url(window, cx)),
832 ),
833 )
834 } else {
835 v_flex()
836 .on_action(cx.listener(|this, _: &menu::Confirm, _window, cx| {
837 this.save_api_url(cx);
838 cx.notify();
839 }))
840 .gap_2()
841 .child(self.api_url_editor.clone())
842 }
843 }
844}
845
846impl Render for ConfigurationView {
847 fn render(&mut self, _: &mut Window, cx: &mut Context<Self>) -> impl IntoElement {
848 let is_authenticated = self.state.read(cx).is_authenticated();
849
850 v_flex()
851 .gap_2()
852 .child(Self::render_instructions())
853 .child(self.render_api_url_editor(cx))
854 .child(self.render_api_key_editor(cx))
855 .child(
856 h_flex()
857 .w_full()
858 .justify_between()
859 .gap_2()
860 .child(
861 h_flex()
862 .w_full()
863 .gap_2()
864 .map(|this| {
865 if is_authenticated {
866 this.child(
867 Button::new("ollama-site", "Ollama")
868 .style(ButtonStyle::Subtle)
869 .icon(IconName::ArrowUpRight)
870 .icon_size(IconSize::XSmall)
871 .icon_color(Color::Muted)
872 .on_click(move |_, _, cx| cx.open_url(OLLAMA_SITE))
873 .into_any_element(),
874 )
875 } else {
876 this.child(
877 Button::new("download_ollama_button", "Download Ollama")
878 .style(ButtonStyle::Subtle)
879 .icon(IconName::ArrowUpRight)
880 .icon_size(IconSize::XSmall)
881 .icon_color(Color::Muted)
882 .on_click(move |_, _, cx| {
883 cx.open_url(OLLAMA_DOWNLOAD_URL)
884 })
885 .into_any_element(),
886 )
887 }
888 })
889 .child(
890 Button::new("view-models", "View All Models")
891 .style(ButtonStyle::Subtle)
892 .icon(IconName::ArrowUpRight)
893 .icon_size(IconSize::XSmall)
894 .icon_color(Color::Muted)
895 .on_click(move |_, _, cx| cx.open_url(OLLAMA_LIBRARY_URL)),
896 ),
897 )
898 .map(|this| {
899 if is_authenticated {
900 this.child(
901 ButtonLike::new("connected")
902 .disabled(true)
903 .cursor_style(CursorStyle::Arrow)
904 .child(
905 h_flex()
906 .gap_2()
907 .child(Icon::new(IconName::Check).color(Color::Success))
908 .child(Label::new("Connected"))
909 .into_any_element(),
910 ),
911 )
912 } else {
913 this.child(
914 Button::new("retry_ollama_models", "Connect")
915 .icon_position(IconPosition::Start)
916 .icon_size(IconSize::XSmall)
917 .icon(IconName::PlayOutlined)
918 .on_click(
919 cx.listener(move |this, _, _, cx| {
920 this.retry_connection(cx)
921 }),
922 ),
923 )
924 }
925 }),
926 )
927 }
928}
929
930fn tool_into_ollama(tool: LanguageModelRequestTool) -> ollama::OllamaTool {
931 ollama::OllamaTool::Function {
932 function: OllamaFunctionTool {
933 name: tool.name,
934 description: Some(tool.description),
935 parameters: Some(tool.input_schema),
936 },
937 }
938}