1use std::sync::Arc;
2
3use anyhow::Result;
4use collections::HashSet;
5use fs::Fs;
6use gpui::{
7 DismissEvent, Entity, EventEmitter, FocusHandle, Focusable, Render, ScrollHandle, Task,
8};
9use language_model::LanguageModelRegistry;
10use language_models::provider::open_ai_compatible::{AvailableModel, ModelCapabilities};
11use settings::{OpenAiCompatibleSettingsContent, update_settings_file};
12use ui::{
13 Banner, Checkbox, KeyBinding, Modal, ModalFooter, ModalHeader, Section, ToggleState,
14 WithScrollbar, prelude::*,
15};
16use ui_input::InputField;
17use workspace::{ModalView, Workspace};
18
19fn single_line_input(
20 label: impl Into<SharedString>,
21 placeholder: &str,
22 text: Option<&str>,
23 tab_index: isize,
24 window: &mut Window,
25 cx: &mut App,
26) -> Entity<InputField> {
27 cx.new(|cx| {
28 let input = InputField::new(window, cx, placeholder)
29 .label(label)
30 .tab_index(tab_index)
31 .tab_stop(true);
32
33 if let Some(text) = text {
34 input.set_text(text, window, cx);
35 }
36 input
37 })
38}
39
40#[derive(Clone, Copy)]
41pub enum LlmCompatibleProvider {
42 OpenAi,
43}
44
45impl LlmCompatibleProvider {
46 fn name(&self) -> &'static str {
47 match self {
48 LlmCompatibleProvider::OpenAi => "OpenAI",
49 }
50 }
51
52 fn api_url(&self) -> &'static str {
53 match self {
54 LlmCompatibleProvider::OpenAi => "https://api.openai.com/v1",
55 }
56 }
57}
58
59struct AddLlmProviderInput {
60 provider_name: Entity<InputField>,
61 api_url: Entity<InputField>,
62 api_key: Entity<InputField>,
63 models: Vec<ModelInput>,
64}
65
66impl AddLlmProviderInput {
67 fn new(provider: LlmCompatibleProvider, window: &mut Window, cx: &mut App) -> Self {
68 let provider_name =
69 single_line_input("Provider Name", provider.name(), None, 1, window, cx);
70 let api_url = single_line_input("API URL", provider.api_url(), None, 2, window, cx);
71 let api_key = single_line_input(
72 "API Key",
73 "000000000000000000000000000000000000000000000000",
74 None,
75 3,
76 window,
77 cx,
78 );
79
80 Self {
81 provider_name,
82 api_url,
83 api_key,
84 models: vec![ModelInput::new(0, window, cx)],
85 }
86 }
87
88 fn add_model(&mut self, window: &mut Window, cx: &mut App) {
89 let model_index = self.models.len();
90 self.models.push(ModelInput::new(model_index, window, cx));
91 }
92
93 fn remove_model(&mut self, index: usize) {
94 self.models.remove(index);
95 }
96}
97
98struct ModelCapabilityToggles {
99 pub supports_tools: ToggleState,
100 pub supports_images: ToggleState,
101 pub supports_parallel_tool_calls: ToggleState,
102 pub supports_prompt_cache_key: ToggleState,
103 pub supports_chat_completions: ToggleState,
104}
105
106struct ModelInput {
107 name: Entity<InputField>,
108 max_completion_tokens: Entity<InputField>,
109 max_output_tokens: Entity<InputField>,
110 max_tokens: Entity<InputField>,
111 capabilities: ModelCapabilityToggles,
112}
113
114impl ModelInput {
115 fn new(model_index: usize, window: &mut Window, cx: &mut App) -> Self {
116 let base_tab_index = (3 + (model_index * 4)) as isize;
117
118 let model_name = single_line_input(
119 "Model Name",
120 "e.g. gpt-5, claude-opus-4, gemini-2.5-pro",
121 None,
122 base_tab_index + 1,
123 window,
124 cx,
125 );
126 let max_completion_tokens = single_line_input(
127 "Max Completion Tokens",
128 "200000",
129 Some("200000"),
130 base_tab_index + 2,
131 window,
132 cx,
133 );
134 let max_output_tokens = single_line_input(
135 "Max Output Tokens",
136 "Max Output Tokens",
137 Some("32000"),
138 base_tab_index + 3,
139 window,
140 cx,
141 );
142 let max_tokens = single_line_input(
143 "Max Tokens",
144 "Max Tokens",
145 Some("200000"),
146 base_tab_index + 4,
147 window,
148 cx,
149 );
150
151 let ModelCapabilities {
152 tools,
153 images,
154 parallel_tool_calls,
155 prompt_cache_key,
156 chat_completions,
157 } = ModelCapabilities::default();
158
159 Self {
160 name: model_name,
161 max_completion_tokens,
162 max_output_tokens,
163 max_tokens,
164 capabilities: ModelCapabilityToggles {
165 supports_tools: tools.into(),
166 supports_images: images.into(),
167 supports_parallel_tool_calls: parallel_tool_calls.into(),
168 supports_prompt_cache_key: prompt_cache_key.into(),
169 supports_chat_completions: chat_completions.into(),
170 },
171 }
172 }
173
174 fn parse(&self, cx: &App) -> Result<AvailableModel, SharedString> {
175 let name = self.name.read(cx).text(cx);
176 if name.is_empty() {
177 return Err(SharedString::from("Model Name cannot be empty"));
178 }
179 Ok(AvailableModel {
180 name,
181 display_name: None,
182 max_completion_tokens: Some(
183 self.max_completion_tokens
184 .read(cx)
185 .text(cx)
186 .parse::<u64>()
187 .map_err(|_| SharedString::from("Max Completion Tokens must be a number"))?,
188 ),
189 max_output_tokens: Some(
190 self.max_output_tokens
191 .read(cx)
192 .text(cx)
193 .parse::<u64>()
194 .map_err(|_| SharedString::from("Max Output Tokens must be a number"))?,
195 ),
196 max_tokens: self
197 .max_tokens
198 .read(cx)
199 .text(cx)
200 .parse::<u64>()
201 .map_err(|_| SharedString::from("Max Tokens must be a number"))?,
202 capabilities: ModelCapabilities {
203 tools: self.capabilities.supports_tools.selected(),
204 images: self.capabilities.supports_images.selected(),
205 parallel_tool_calls: self.capabilities.supports_parallel_tool_calls.selected(),
206 prompt_cache_key: self.capabilities.supports_prompt_cache_key.selected(),
207 chat_completions: self.capabilities.supports_chat_completions.selected(),
208 },
209 })
210 }
211}
212
213fn save_provider_to_settings(
214 input: &AddLlmProviderInput,
215 cx: &mut App,
216) -> Task<Result<(), SharedString>> {
217 let provider_name: Arc<str> = input.provider_name.read(cx).text(cx).into();
218 if provider_name.is_empty() {
219 return Task::ready(Err("Provider Name cannot be empty".into()));
220 }
221
222 if LanguageModelRegistry::read_global(cx)
223 .providers()
224 .iter()
225 .any(|provider| {
226 provider.id().0.as_ref() == provider_name.as_ref()
227 || provider.name().0.as_ref() == provider_name.as_ref()
228 })
229 {
230 return Task::ready(Err(
231 "Provider Name is already taken by another provider".into()
232 ));
233 }
234
235 let api_url = input.api_url.read(cx).text(cx);
236 if api_url.is_empty() {
237 return Task::ready(Err("API URL cannot be empty".into()));
238 }
239
240 let api_key = input.api_key.read(cx).text(cx);
241 if api_key.is_empty() {
242 return Task::ready(Err("API Key cannot be empty".into()));
243 }
244
245 let mut models = Vec::new();
246 let mut model_names: HashSet<String> = HashSet::default();
247 for model in &input.models {
248 match model.parse(cx) {
249 Ok(model) => {
250 if !model_names.insert(model.name.clone()) {
251 return Task::ready(Err("Model Names must be unique".into()));
252 }
253 models.push(model)
254 }
255 Err(err) => return Task::ready(Err(err)),
256 }
257 }
258
259 let fs = <dyn Fs>::global(cx);
260 let task = cx.write_credentials(&api_url, "Bearer", api_key.as_bytes());
261 cx.spawn(async move |cx| {
262 task.await
263 .map_err(|_| SharedString::from("Failed to write API key to keychain"))?;
264 cx.update(|cx| {
265 update_settings_file(fs, cx, |settings, _cx| {
266 settings
267 .language_models
268 .get_or_insert_default()
269 .openai_compatible
270 .get_or_insert_default()
271 .insert(
272 provider_name,
273 OpenAiCompatibleSettingsContent {
274 api_url,
275 available_models: models,
276 },
277 );
278 });
279 });
280 Ok(())
281 })
282}
283
284pub struct AddLlmProviderModal {
285 provider: LlmCompatibleProvider,
286 input: AddLlmProviderInput,
287 scroll_handle: ScrollHandle,
288 focus_handle: FocusHandle,
289 last_error: Option<SharedString>,
290}
291
292impl AddLlmProviderModal {
293 pub fn toggle(
294 provider: LlmCompatibleProvider,
295 workspace: &mut Workspace,
296 window: &mut Window,
297 cx: &mut Context<Workspace>,
298 ) {
299 workspace.toggle_modal(window, cx, |window, cx| Self::new(provider, window, cx));
300 }
301
302 fn new(provider: LlmCompatibleProvider, window: &mut Window, cx: &mut Context<Self>) -> Self {
303 Self {
304 input: AddLlmProviderInput::new(provider, window, cx),
305 provider,
306 last_error: None,
307 focus_handle: cx.focus_handle(),
308 scroll_handle: ScrollHandle::new(),
309 }
310 }
311
312 fn confirm(&mut self, _: &menu::Confirm, _: &mut Window, cx: &mut Context<Self>) {
313 let task = save_provider_to_settings(&self.input, cx);
314 cx.spawn(async move |this, cx| {
315 let result = task.await;
316 this.update(cx, |this, cx| match result {
317 Ok(_) => {
318 cx.emit(DismissEvent);
319 }
320 Err(error) => {
321 this.last_error = Some(error);
322 cx.notify();
323 }
324 })
325 })
326 .detach_and_log_err(cx);
327 }
328
329 fn cancel(&mut self, _: &menu::Cancel, _: &mut Window, cx: &mut Context<Self>) {
330 cx.emit(DismissEvent);
331 }
332
333 fn render_model_section(&self, cx: &mut Context<Self>) -> impl IntoElement {
334 v_flex()
335 .mt_1()
336 .gap_2()
337 .child(
338 h_flex()
339 .justify_between()
340 .child(Label::new("Models").size(LabelSize::Small))
341 .child(
342 Button::new("add-model", "Add Model")
343 .icon(IconName::Plus)
344 .icon_position(IconPosition::Start)
345 .icon_size(IconSize::XSmall)
346 .icon_color(Color::Muted)
347 .label_size(LabelSize::Small)
348 .on_click(cx.listener(|this, _, window, cx| {
349 this.input.add_model(window, cx);
350 cx.notify();
351 })),
352 ),
353 )
354 .children(
355 self.input
356 .models
357 .iter()
358 .enumerate()
359 .map(|(ix, _)| self.render_model(ix, cx)),
360 )
361 }
362
363 fn render_model(&self, ix: usize, cx: &mut Context<Self>) -> impl IntoElement + use<> {
364 let has_more_than_one_model = self.input.models.len() > 1;
365 let model = &self.input.models[ix];
366
367 v_flex()
368 .p_2()
369 .gap_2()
370 .rounded_sm()
371 .border_1()
372 .border_dashed()
373 .border_color(cx.theme().colors().border.opacity(0.6))
374 .bg(cx.theme().colors().element_active.opacity(0.15))
375 .child(model.name.clone())
376 .child(
377 h_flex()
378 .gap_2()
379 .child(model.max_completion_tokens.clone())
380 .child(model.max_output_tokens.clone()),
381 )
382 .child(model.max_tokens.clone())
383 .child(
384 v_flex()
385 .gap_1()
386 .child(
387 Checkbox::new(("supports-tools", ix), model.capabilities.supports_tools)
388 .label("Supports tools")
389 .on_click(cx.listener(move |this, checked, _window, cx| {
390 this.input.models[ix].capabilities.supports_tools = *checked;
391 cx.notify();
392 })),
393 )
394 .child(
395 Checkbox::new(("supports-images", ix), model.capabilities.supports_images)
396 .label("Supports images")
397 .on_click(cx.listener(move |this, checked, _window, cx| {
398 this.input.models[ix].capabilities.supports_images = *checked;
399 cx.notify();
400 })),
401 )
402 .child(
403 Checkbox::new(
404 ("supports-parallel-tool-calls", ix),
405 model.capabilities.supports_parallel_tool_calls,
406 )
407 .label("Supports parallel_tool_calls")
408 .on_click(cx.listener(
409 move |this, checked, _window, cx| {
410 this.input.models[ix]
411 .capabilities
412 .supports_parallel_tool_calls = *checked;
413 cx.notify();
414 },
415 )),
416 )
417 .child(
418 Checkbox::new(
419 ("supports-prompt-cache-key", ix),
420 model.capabilities.supports_prompt_cache_key,
421 )
422 .label("Supports prompt_cache_key")
423 .on_click(cx.listener(
424 move |this, checked, _window, cx| {
425 this.input.models[ix].capabilities.supports_prompt_cache_key =
426 *checked;
427 cx.notify();
428 },
429 )),
430 )
431 .child(
432 Checkbox::new(
433 ("supports-chat-completions", ix),
434 model.capabilities.supports_chat_completions,
435 )
436 .label("Supports /chat/completions")
437 .on_click(cx.listener(
438 move |this, checked, _window, cx| {
439 this.input.models[ix].capabilities.supports_chat_completions =
440 *checked;
441 cx.notify();
442 },
443 )),
444 ),
445 )
446 .when(has_more_than_one_model, |this| {
447 this.child(
448 Button::new(("remove-model", ix), "Remove Model")
449 .icon(IconName::Trash)
450 .icon_position(IconPosition::Start)
451 .icon_size(IconSize::XSmall)
452 .icon_color(Color::Muted)
453 .label_size(LabelSize::Small)
454 .style(ButtonStyle::Outlined)
455 .full_width()
456 .on_click(cx.listener(move |this, _, _window, cx| {
457 this.input.remove_model(ix);
458 cx.notify();
459 })),
460 )
461 })
462 }
463
464 fn on_tab(&mut self, _: &menu::SelectNext, window: &mut Window, cx: &mut Context<Self>) {
465 window.focus_next(cx);
466 }
467
468 fn on_tab_prev(
469 &mut self,
470 _: &menu::SelectPrevious,
471 window: &mut Window,
472 cx: &mut Context<Self>,
473 ) {
474 window.focus_prev(cx);
475 }
476}
477
478impl EventEmitter<DismissEvent> for AddLlmProviderModal {}
479
480impl Focusable for AddLlmProviderModal {
481 fn focus_handle(&self, _cx: &App) -> FocusHandle {
482 self.focus_handle.clone()
483 }
484}
485
486impl ModalView for AddLlmProviderModal {}
487
488impl Render for AddLlmProviderModal {
489 fn render(&mut self, window: &mut ui::Window, cx: &mut ui::Context<Self>) -> impl IntoElement {
490 let focus_handle = self.focus_handle(cx);
491
492 let window_size = window.viewport_size();
493 let rem_size = window.rem_size();
494 let is_large_window = window_size.height / rem_size > rems_from_px(600.).0;
495
496 let modal_max_height = if is_large_window {
497 rems_from_px(450.)
498 } else {
499 rems_from_px(200.)
500 };
501
502 v_flex()
503 .id("add-llm-provider-modal")
504 .key_context("AddLlmProviderModal")
505 .w(rems(34.))
506 .elevation_3(cx)
507 .on_action(cx.listener(Self::cancel))
508 .on_action(cx.listener(Self::on_tab))
509 .on_action(cx.listener(Self::on_tab_prev))
510 .capture_any_mouse_down(cx.listener(|this, _, window, cx| {
511 this.focus_handle(cx).focus(window, cx);
512 }))
513 .child(
514 Modal::new("configure-context-server", None)
515 .header(ModalHeader::new().headline("Add LLM Provider").description(
516 match self.provider {
517 LlmCompatibleProvider::OpenAi => {
518 "This provider will use an OpenAI compatible API."
519 }
520 },
521 ))
522 .when_some(self.last_error.clone(), |this, error| {
523 this.section(
524 Section::new().child(
525 Banner::new()
526 .severity(Severity::Warning)
527 .child(div().text_xs().child(error)),
528 ),
529 )
530 })
531 .child(
532 div()
533 .size_full()
534 .vertical_scrollbar_for(&self.scroll_handle, window, cx)
535 .child(
536 v_flex()
537 .id("modal_content")
538 .size_full()
539 .tab_group()
540 .max_h(modal_max_height)
541 .pl_3()
542 .pr_4()
543 .pb_2()
544 .gap_2()
545 .overflow_y_scroll()
546 .track_scroll(&self.scroll_handle)
547 .child(self.input.provider_name.clone())
548 .child(self.input.api_url.clone())
549 .child(self.input.api_key.clone())
550 .child(self.render_model_section(cx)),
551 ),
552 )
553 .footer(
554 ModalFooter::new().end_slot(
555 h_flex()
556 .gap_1()
557 .child(
558 Button::new("cancel", "Cancel")
559 .key_binding(
560 KeyBinding::for_action_in(
561 &menu::Cancel,
562 &focus_handle,
563 cx,
564 )
565 .map(|kb| kb.size(rems_from_px(12.))),
566 )
567 .on_click(cx.listener(|this, _event, window, cx| {
568 this.cancel(&menu::Cancel, window, cx)
569 })),
570 )
571 .child(
572 Button::new("save-server", "Save Provider")
573 .key_binding(
574 KeyBinding::for_action_in(
575 &menu::Confirm,
576 &focus_handle,
577 cx,
578 )
579 .map(|kb| kb.size(rems_from_px(12.))),
580 )
581 .on_click(cx.listener(|this, _event, window, cx| {
582 this.confirm(&menu::Confirm, window, cx)
583 })),
584 ),
585 ),
586 ),
587 )
588 }
589}
590
591#[cfg(test)]
592mod tests {
593 use super::*;
594 use fs::FakeFs;
595 use gpui::{TestAppContext, VisualTestContext};
596 use language_model::{
597 LanguageModelProviderId, LanguageModelProviderName,
598 fake_provider::FakeLanguageModelProvider,
599 };
600 use project::Project;
601 use settings::SettingsStore;
602 use util::path;
603 use workspace::MultiWorkspace;
604
605 #[gpui::test]
606 async fn test_save_provider_invalid_inputs(cx: &mut TestAppContext) {
607 let cx = setup_test(cx).await;
608
609 assert_eq!(
610 save_provider_validation_errors("", "someurl", "somekey", vec![], cx,).await,
611 Some("Provider Name cannot be empty".into())
612 );
613
614 assert_eq!(
615 save_provider_validation_errors("someprovider", "", "somekey", vec![], cx,).await,
616 Some("API URL cannot be empty".into())
617 );
618
619 assert_eq!(
620 save_provider_validation_errors("someprovider", "someurl", "", vec![], cx,).await,
621 Some("API Key cannot be empty".into())
622 );
623
624 assert_eq!(
625 save_provider_validation_errors(
626 "someprovider",
627 "someurl",
628 "somekey",
629 vec![("", "200000", "200000", "32000")],
630 cx,
631 )
632 .await,
633 Some("Model Name cannot be empty".into())
634 );
635
636 assert_eq!(
637 save_provider_validation_errors(
638 "someprovider",
639 "someurl",
640 "somekey",
641 vec![("somemodel", "abc", "200000", "32000")],
642 cx,
643 )
644 .await,
645 Some("Max Tokens must be a number".into())
646 );
647
648 assert_eq!(
649 save_provider_validation_errors(
650 "someprovider",
651 "someurl",
652 "somekey",
653 vec![("somemodel", "200000", "abc", "32000")],
654 cx,
655 )
656 .await,
657 Some("Max Completion Tokens must be a number".into())
658 );
659
660 assert_eq!(
661 save_provider_validation_errors(
662 "someprovider",
663 "someurl",
664 "somekey",
665 vec![("somemodel", "200000", "200000", "abc")],
666 cx,
667 )
668 .await,
669 Some("Max Output Tokens must be a number".into())
670 );
671
672 assert_eq!(
673 save_provider_validation_errors(
674 "someprovider",
675 "someurl",
676 "somekey",
677 vec![
678 ("somemodel", "200000", "200000", "32000"),
679 ("somemodel", "200000", "200000", "32000"),
680 ],
681 cx,
682 )
683 .await,
684 Some("Model Names must be unique".into())
685 );
686 }
687
688 #[gpui::test]
689 async fn test_save_provider_name_conflict(cx: &mut TestAppContext) {
690 let cx = setup_test(cx).await;
691
692 cx.update(|_window, cx| {
693 LanguageModelRegistry::global(cx).update(cx, |registry, cx| {
694 registry.register_provider(
695 Arc::new(FakeLanguageModelProvider::new(
696 LanguageModelProviderId::new("someprovider"),
697 LanguageModelProviderName::new("Some Provider"),
698 )),
699 cx,
700 );
701 });
702 });
703
704 assert_eq!(
705 save_provider_validation_errors(
706 "someprovider",
707 "someurl",
708 "someapikey",
709 vec![("somemodel", "200000", "200000", "32000")],
710 cx,
711 )
712 .await,
713 Some("Provider Name is already taken by another provider".into())
714 );
715 }
716
717 #[gpui::test]
718 async fn test_model_input_default_capabilities(cx: &mut TestAppContext) {
719 let cx = setup_test(cx).await;
720
721 cx.update(|window, cx| {
722 let model_input = ModelInput::new(0, window, cx);
723 model_input.name.update(cx, |input, cx| {
724 input.set_text("somemodel", window, cx);
725 });
726 assert_eq!(
727 model_input.capabilities.supports_tools,
728 ToggleState::Selected
729 );
730 assert_eq!(
731 model_input.capabilities.supports_images,
732 ToggleState::Unselected
733 );
734 assert_eq!(
735 model_input.capabilities.supports_parallel_tool_calls,
736 ToggleState::Unselected
737 );
738 assert_eq!(
739 model_input.capabilities.supports_prompt_cache_key,
740 ToggleState::Unselected
741 );
742 assert_eq!(
743 model_input.capabilities.supports_chat_completions,
744 ToggleState::Selected
745 );
746
747 let parsed_model = model_input.parse(cx).unwrap();
748 assert!(parsed_model.capabilities.tools);
749 assert!(!parsed_model.capabilities.images);
750 assert!(!parsed_model.capabilities.parallel_tool_calls);
751 assert!(!parsed_model.capabilities.prompt_cache_key);
752 assert!(parsed_model.capabilities.chat_completions);
753 });
754 }
755
756 #[gpui::test]
757 async fn test_model_input_deselected_capabilities(cx: &mut TestAppContext) {
758 let cx = setup_test(cx).await;
759
760 cx.update(|window, cx| {
761 let mut model_input = ModelInput::new(0, window, cx);
762 model_input.name.update(cx, |input, cx| {
763 input.set_text("somemodel", window, cx);
764 });
765
766 model_input.capabilities.supports_tools = ToggleState::Unselected;
767 model_input.capabilities.supports_images = ToggleState::Unselected;
768 model_input.capabilities.supports_parallel_tool_calls = ToggleState::Unselected;
769 model_input.capabilities.supports_prompt_cache_key = ToggleState::Unselected;
770 model_input.capabilities.supports_chat_completions = ToggleState::Unselected;
771
772 let parsed_model = model_input.parse(cx).unwrap();
773 assert!(!parsed_model.capabilities.tools);
774 assert!(!parsed_model.capabilities.images);
775 assert!(!parsed_model.capabilities.parallel_tool_calls);
776 assert!(!parsed_model.capabilities.prompt_cache_key);
777 assert!(!parsed_model.capabilities.chat_completions);
778 });
779 }
780
781 #[gpui::test]
782 async fn test_model_input_with_name_and_capabilities(cx: &mut TestAppContext) {
783 let cx = setup_test(cx).await;
784
785 cx.update(|window, cx| {
786 let mut model_input = ModelInput::new(0, window, cx);
787 model_input.name.update(cx, |input, cx| {
788 input.set_text("somemodel", window, cx);
789 });
790
791 model_input.capabilities.supports_tools = ToggleState::Selected;
792 model_input.capabilities.supports_images = ToggleState::Unselected;
793 model_input.capabilities.supports_parallel_tool_calls = ToggleState::Selected;
794 model_input.capabilities.supports_prompt_cache_key = ToggleState::Unselected;
795 model_input.capabilities.supports_chat_completions = ToggleState::Selected;
796
797 let parsed_model = model_input.parse(cx).unwrap();
798 assert_eq!(parsed_model.name, "somemodel");
799 assert!(parsed_model.capabilities.tools);
800 assert!(!parsed_model.capabilities.images);
801 assert!(parsed_model.capabilities.parallel_tool_calls);
802 assert!(!parsed_model.capabilities.prompt_cache_key);
803 assert!(parsed_model.capabilities.chat_completions);
804 });
805 }
806
807 async fn setup_test(cx: &mut TestAppContext) -> &mut VisualTestContext {
808 cx.update(|cx| {
809 let store = SettingsStore::test(cx);
810 cx.set_global(store);
811 theme::init(theme::LoadThemes::JustBase, cx);
812
813 language_model::init_settings(cx);
814 editor::init(cx);
815 });
816
817 let fs = FakeFs::new(cx.executor());
818 cx.update(|cx| <dyn Fs>::set_global(fs.clone(), cx));
819 let project = Project::test(fs, [path!("/dir").as_ref()], cx).await;
820 let (multi_workspace, cx) =
821 cx.add_window_view(|window, cx| MultiWorkspace::test_new(project.clone(), window, cx));
822 let _workspace = multi_workspace.read_with(cx, |mw, _| mw.workspace().clone());
823
824 cx
825 }
826
827 async fn save_provider_validation_errors(
828 provider_name: &str,
829 api_url: &str,
830 api_key: &str,
831 models: Vec<(&str, &str, &str, &str)>,
832 cx: &mut VisualTestContext,
833 ) -> Option<SharedString> {
834 fn set_text(input: &Entity<InputField>, text: &str, window: &mut Window, cx: &mut App) {
835 input.update(cx, |input, cx| {
836 input.set_text(text, window, cx);
837 });
838 }
839
840 let task = cx.update(|window, cx| {
841 let mut input = AddLlmProviderInput::new(LlmCompatibleProvider::OpenAi, window, cx);
842 set_text(&input.provider_name, provider_name, window, cx);
843 set_text(&input.api_url, api_url, window, cx);
844 set_text(&input.api_key, api_key, window, cx);
845
846 for (i, (name, max_tokens, max_completion_tokens, max_output_tokens)) in
847 models.iter().enumerate()
848 {
849 if i >= input.models.len() {
850 input.models.push(ModelInput::new(i, window, cx));
851 }
852 let model = &mut input.models[i];
853 set_text(&model.name, name, window, cx);
854 set_text(&model.max_tokens, max_tokens, window, cx);
855 set_text(
856 &model.max_completion_tokens,
857 max_completion_tokens,
858 window,
859 cx,
860 );
861 set_text(&model.max_output_tokens, max_output_tokens, window, cx);
862 }
863 save_provider_to_settings(&input, cx)
864 });
865
866 task.await.err()
867 }
868}