1use anyhow::{Context as _, Result, anyhow};
2use collections::{BTreeMap, HashMap};
3use credentials_provider::CredentialsProvider;
4
5use futures::Stream;
6use futures::{FutureExt, StreamExt, future::BoxFuture};
7use gpui::{AnyView, App, AsyncApp, Context, Entity, Subscription, Task, Window};
8use http_client::HttpClient;
9use language_model::{
10 AuthenticateError, LanguageModel, LanguageModelCompletionError, LanguageModelCompletionEvent,
11 LanguageModelId, LanguageModelName, LanguageModelProvider, LanguageModelProviderId,
12 LanguageModelProviderName, LanguageModelProviderState, LanguageModelRequest,
13 LanguageModelToolChoice, LanguageModelToolResultContent, LanguageModelToolUse, MessageContent,
14 RateLimiter, Role, StopReason,
15};
16use menu;
17use open_ai::{ImageUrl, ResponseStreamEvent, stream_completion};
18use schemars::JsonSchema;
19use serde::{Deserialize, Serialize};
20use settings::{Settings, SettingsStore};
21use std::pin::Pin;
22use std::str::FromStr as _;
23use std::sync::Arc;
24use strum::IntoEnumIterator;
25use vercel::Model;
26
27use ui::{ElevationIndex, List, Tooltip, prelude::*};
28use ui_input::SingleLineInput;
29use util::ResultExt;
30
31use crate::{AllLanguageModelSettings, ui::InstructionListItem};
32
33const PROVIDER_ID: &str = "vercel";
34const PROVIDER_NAME: &str = "Vercel";
35
36#[derive(Default, Clone, Debug, PartialEq)]
37pub struct VercelSettings {
38 pub api_url: String,
39 pub available_models: Vec<AvailableModel>,
40 pub needs_setting_migration: bool,
41}
42
43#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema)]
44pub struct AvailableModel {
45 pub name: String,
46 pub display_name: Option<String>,
47 pub max_tokens: u64,
48 pub max_output_tokens: Option<u64>,
49 pub max_completion_tokens: Option<u64>,
50}
51
52pub struct VercelLanguageModelProvider {
53 http_client: Arc<dyn HttpClient>,
54 state: gpui::Entity<State>,
55}
56
57pub struct State {
58 api_key: Option<String>,
59 api_key_from_env: bool,
60 _subscription: Subscription,
61}
62
63const VERCEL_API_KEY_VAR: &str = "VERCEL_API_KEY";
64
65impl State {
66 fn is_authenticated(&self) -> bool {
67 self.api_key.is_some()
68 }
69
70 fn reset_api_key(&self, cx: &mut Context<Self>) -> Task<Result<()>> {
71 let credentials_provider = <dyn CredentialsProvider>::global(cx);
72 let settings = &AllLanguageModelSettings::get_global(cx).vercel;
73 let api_url = if settings.api_url.is_empty() {
74 vercel::VERCEL_API_URL.to_string()
75 } else {
76 settings.api_url.clone()
77 };
78 cx.spawn(async move |this, cx| {
79 credentials_provider
80 .delete_credentials(&api_url, &cx)
81 .await
82 .log_err();
83 this.update(cx, |this, cx| {
84 this.api_key = None;
85 this.api_key_from_env = false;
86 cx.notify();
87 })
88 })
89 }
90
91 fn set_api_key(&mut self, api_key: String, cx: &mut Context<Self>) -> Task<Result<()>> {
92 let credentials_provider = <dyn CredentialsProvider>::global(cx);
93 let settings = &AllLanguageModelSettings::get_global(cx).vercel;
94 let api_url = if settings.api_url.is_empty() {
95 vercel::VERCEL_API_URL.to_string()
96 } else {
97 settings.api_url.clone()
98 };
99 cx.spawn(async move |this, cx| {
100 credentials_provider
101 .write_credentials(&api_url, "Bearer", api_key.as_bytes(), &cx)
102 .await
103 .log_err();
104 this.update(cx, |this, cx| {
105 this.api_key = Some(api_key);
106 cx.notify();
107 })
108 })
109 }
110
111 fn authenticate(&self, cx: &mut Context<Self>) -> Task<Result<(), AuthenticateError>> {
112 if self.is_authenticated() {
113 return Task::ready(Ok(()));
114 }
115
116 let credentials_provider = <dyn CredentialsProvider>::global(cx);
117 let settings = &AllLanguageModelSettings::get_global(cx).vercel;
118 let api_url = if settings.api_url.is_empty() {
119 vercel::VERCEL_API_URL.to_string()
120 } else {
121 settings.api_url.clone()
122 };
123 cx.spawn(async move |this, cx| {
124 let (api_key, from_env) = if let Ok(api_key) = std::env::var(VERCEL_API_KEY_VAR) {
125 (api_key, true)
126 } else {
127 let (_, api_key) = credentials_provider
128 .read_credentials(&api_url, &cx)
129 .await?
130 .ok_or(AuthenticateError::CredentialsNotFound)?;
131 (
132 String::from_utf8(api_key).context("invalid {PROVIDER_NAME} API key")?,
133 false,
134 )
135 };
136 this.update(cx, |this, cx| {
137 this.api_key = Some(api_key);
138 this.api_key_from_env = from_env;
139 cx.notify();
140 })?;
141
142 Ok(())
143 })
144 }
145}
146
147impl VercelLanguageModelProvider {
148 pub fn new(http_client: Arc<dyn HttpClient>, cx: &mut App) -> Self {
149 let state = cx.new(|cx| State {
150 api_key: None,
151 api_key_from_env: false,
152 _subscription: cx.observe_global::<SettingsStore>(|_this: &mut State, cx| {
153 cx.notify();
154 }),
155 });
156
157 Self { http_client, state }
158 }
159
160 fn create_language_model(&self, model: vercel::Model) -> Arc<dyn LanguageModel> {
161 Arc::new(VercelLanguageModel {
162 id: LanguageModelId::from(model.id().to_string()),
163 model,
164 state: self.state.clone(),
165 http_client: self.http_client.clone(),
166 request_limiter: RateLimiter::new(4),
167 })
168 }
169}
170
171impl LanguageModelProviderState for VercelLanguageModelProvider {
172 type ObservableEntity = State;
173
174 fn observable_entity(&self) -> Option<gpui::Entity<Self::ObservableEntity>> {
175 Some(self.state.clone())
176 }
177}
178
179impl LanguageModelProvider for VercelLanguageModelProvider {
180 fn id(&self) -> LanguageModelProviderId {
181 LanguageModelProviderId(PROVIDER_ID.into())
182 }
183
184 fn name(&self) -> LanguageModelProviderName {
185 LanguageModelProviderName(PROVIDER_NAME.into())
186 }
187
188 fn icon(&self) -> IconName {
189 IconName::AiVZero
190 }
191
192 fn default_model(&self, _cx: &App) -> Option<Arc<dyn LanguageModel>> {
193 Some(self.create_language_model(vercel::Model::default()))
194 }
195
196 fn default_fast_model(&self, _cx: &App) -> Option<Arc<dyn LanguageModel>> {
197 Some(self.create_language_model(vercel::Model::default_fast()))
198 }
199
200 fn provided_models(&self, cx: &App) -> Vec<Arc<dyn LanguageModel>> {
201 let mut models = BTreeMap::default();
202
203 // Add base models from vercel::Model::iter()
204 for model in vercel::Model::iter() {
205 if !matches!(model, vercel::Model::Custom { .. }) {
206 models.insert(model.id().to_string(), model);
207 }
208 }
209
210 // Override with available models from settings
211 for model in &AllLanguageModelSettings::get_global(cx)
212 .vercel
213 .available_models
214 {
215 models.insert(
216 model.name.clone(),
217 vercel::Model::Custom {
218 name: model.name.clone(),
219 display_name: model.display_name.clone(),
220 max_tokens: model.max_tokens,
221 max_output_tokens: model.max_output_tokens,
222 max_completion_tokens: model.max_completion_tokens,
223 },
224 );
225 }
226
227 models
228 .into_values()
229 .map(|model| self.create_language_model(model))
230 .collect()
231 }
232
233 fn is_authenticated(&self, cx: &App) -> bool {
234 self.state.read(cx).is_authenticated()
235 }
236
237 fn authenticate(&self, cx: &mut App) -> Task<Result<(), AuthenticateError>> {
238 self.state.update(cx, |state, cx| state.authenticate(cx))
239 }
240
241 fn configuration_view(&self, window: &mut Window, cx: &mut App) -> AnyView {
242 cx.new(|cx| ConfigurationView::new(self.state.clone(), window, cx))
243 .into()
244 }
245
246 fn reset_credentials(&self, cx: &mut App) -> Task<Result<()>> {
247 self.state.update(cx, |state, cx| state.reset_api_key(cx))
248 }
249}
250
251pub struct VercelLanguageModel {
252 id: LanguageModelId,
253 model: vercel::Model,
254 state: gpui::Entity<State>,
255 http_client: Arc<dyn HttpClient>,
256 request_limiter: RateLimiter,
257}
258
259impl VercelLanguageModel {
260 fn stream_completion(
261 &self,
262 request: open_ai::Request,
263 cx: &AsyncApp,
264 ) -> BoxFuture<'static, Result<futures::stream::BoxStream<'static, Result<ResponseStreamEvent>>>>
265 {
266 let http_client = self.http_client.clone();
267 let Ok((api_key, api_url)) = cx.read_entity(&self.state, |state, cx| {
268 let settings = &AllLanguageModelSettings::get_global(cx).vercel;
269 let api_url = if settings.api_url.is_empty() {
270 vercel::VERCEL_API_URL.to_string()
271 } else {
272 settings.api_url.clone()
273 };
274 (state.api_key.clone(), api_url)
275 }) else {
276 return futures::future::ready(Err(anyhow!("App state dropped"))).boxed();
277 };
278
279 let future = self.request_limiter.stream(async move {
280 let api_key = api_key.context("Missing Vercel API Key")?;
281 let request = stream_completion(http_client.as_ref(), &api_url, &api_key, request);
282 let response = request.await?;
283 Ok(response)
284 });
285
286 async move { Ok(future.await?.boxed()) }.boxed()
287 }
288}
289
290impl LanguageModel for VercelLanguageModel {
291 fn id(&self) -> LanguageModelId {
292 self.id.clone()
293 }
294
295 fn name(&self) -> LanguageModelName {
296 LanguageModelName::from(self.model.display_name().to_string())
297 }
298
299 fn provider_id(&self) -> LanguageModelProviderId {
300 LanguageModelProviderId(PROVIDER_ID.into())
301 }
302
303 fn provider_name(&self) -> LanguageModelProviderName {
304 LanguageModelProviderName(PROVIDER_NAME.into())
305 }
306
307 fn supports_tools(&self) -> bool {
308 true
309 }
310
311 fn supports_images(&self) -> bool {
312 false
313 }
314
315 fn supports_tool_choice(&self, choice: LanguageModelToolChoice) -> bool {
316 match choice {
317 LanguageModelToolChoice::Auto => true,
318 LanguageModelToolChoice::Any => true,
319 LanguageModelToolChoice::None => true,
320 }
321 }
322
323 fn telemetry_id(&self) -> String {
324 format!("vercel/{}", self.model.id())
325 }
326
327 fn max_token_count(&self) -> u64 {
328 self.model.max_token_count()
329 }
330
331 fn max_output_tokens(&self) -> Option<u64> {
332 self.model.max_output_tokens()
333 }
334
335 fn count_tokens(
336 &self,
337 request: LanguageModelRequest,
338 cx: &App,
339 ) -> BoxFuture<'static, Result<u64>> {
340 count_vercel_tokens(request, self.model.clone(), cx)
341 }
342
343 fn stream_completion(
344 &self,
345 request: LanguageModelRequest,
346 cx: &AsyncApp,
347 ) -> BoxFuture<
348 'static,
349 Result<
350 futures::stream::BoxStream<
351 'static,
352 Result<LanguageModelCompletionEvent, LanguageModelCompletionError>,
353 >,
354 LanguageModelCompletionError,
355 >,
356 > {
357 let request = into_vercel(request, &self.model, self.max_output_tokens());
358 let completions = self.stream_completion(request, cx);
359 async move {
360 let mapper = VercelEventMapper::new();
361 Ok(mapper.map_stream(completions.await?).boxed())
362 }
363 .boxed()
364 }
365}
366
367pub fn into_vercel(
368 request: LanguageModelRequest,
369 model: &vercel::Model,
370 max_output_tokens: Option<u64>,
371) -> open_ai::Request {
372 let stream = !model.id().starts_with("o1-");
373
374 let mut messages = Vec::new();
375 for message in request.messages {
376 for content in message.content {
377 match content {
378 MessageContent::Text(text) | MessageContent::Thinking { text, .. } => {
379 add_message_content_part(
380 open_ai::MessagePart::Text { text: text },
381 message.role,
382 &mut messages,
383 )
384 }
385 MessageContent::RedactedThinking(_) => {}
386 MessageContent::Image(image) => {
387 add_message_content_part(
388 open_ai::MessagePart::Image {
389 image_url: ImageUrl {
390 url: image.to_base64_url(),
391 detail: None,
392 },
393 },
394 message.role,
395 &mut messages,
396 );
397 }
398 MessageContent::ToolUse(tool_use) => {
399 let tool_call = open_ai::ToolCall {
400 id: tool_use.id.to_string(),
401 content: open_ai::ToolCallContent::Function {
402 function: open_ai::FunctionContent {
403 name: tool_use.name.to_string(),
404 arguments: serde_json::to_string(&tool_use.input)
405 .unwrap_or_default(),
406 },
407 },
408 };
409
410 if let Some(open_ai::RequestMessage::Assistant { tool_calls, .. }) =
411 messages.last_mut()
412 {
413 tool_calls.push(tool_call);
414 } else {
415 messages.push(open_ai::RequestMessage::Assistant {
416 content: None,
417 tool_calls: vec![tool_call],
418 });
419 }
420 }
421 MessageContent::ToolResult(tool_result) => {
422 let content = match &tool_result.content {
423 LanguageModelToolResultContent::Text(text) => {
424 vec![open_ai::MessagePart::Text {
425 text: text.to_string(),
426 }]
427 }
428 LanguageModelToolResultContent::Image(image) => {
429 vec![open_ai::MessagePart::Image {
430 image_url: ImageUrl {
431 url: image.to_base64_url(),
432 detail: None,
433 },
434 }]
435 }
436 };
437
438 messages.push(open_ai::RequestMessage::Tool {
439 content: content.into(),
440 tool_call_id: tool_result.tool_use_id.to_string(),
441 });
442 }
443 }
444 }
445 }
446
447 open_ai::Request {
448 model: model.id().into(),
449 messages,
450 stream,
451 stop: request.stop,
452 temperature: request.temperature.unwrap_or(1.0),
453 max_completion_tokens: max_output_tokens,
454 parallel_tool_calls: if model.supports_parallel_tool_calls() && !request.tools.is_empty() {
455 // Disable parallel tool calls, as the Agent currently expects a maximum of one per turn.
456 Some(false)
457 } else {
458 None
459 },
460 tools: request
461 .tools
462 .into_iter()
463 .map(|tool| open_ai::ToolDefinition::Function {
464 function: open_ai::FunctionDefinition {
465 name: tool.name,
466 description: Some(tool.description),
467 parameters: Some(tool.input_schema),
468 },
469 })
470 .collect(),
471 tool_choice: request.tool_choice.map(|choice| match choice {
472 LanguageModelToolChoice::Auto => open_ai::ToolChoice::Auto,
473 LanguageModelToolChoice::Any => open_ai::ToolChoice::Required,
474 LanguageModelToolChoice::None => open_ai::ToolChoice::None,
475 }),
476 }
477}
478
479fn add_message_content_part(
480 new_part: open_ai::MessagePart,
481 role: Role,
482 messages: &mut Vec<open_ai::RequestMessage>,
483) {
484 match (role, messages.last_mut()) {
485 (Role::User, Some(open_ai::RequestMessage::User { content }))
486 | (
487 Role::Assistant,
488 Some(open_ai::RequestMessage::Assistant {
489 content: Some(content),
490 ..
491 }),
492 )
493 | (Role::System, Some(open_ai::RequestMessage::System { content, .. })) => {
494 content.push_part(new_part);
495 }
496 _ => {
497 messages.push(match role {
498 Role::User => open_ai::RequestMessage::User {
499 content: open_ai::MessageContent::from(vec![new_part]),
500 },
501 Role::Assistant => open_ai::RequestMessage::Assistant {
502 content: Some(open_ai::MessageContent::from(vec![new_part])),
503 tool_calls: Vec::new(),
504 },
505 Role::System => open_ai::RequestMessage::System {
506 content: open_ai::MessageContent::from(vec![new_part]),
507 },
508 });
509 }
510 }
511}
512
513pub struct VercelEventMapper {
514 tool_calls_by_index: HashMap<usize, RawToolCall>,
515}
516
517impl VercelEventMapper {
518 pub fn new() -> Self {
519 Self {
520 tool_calls_by_index: HashMap::default(),
521 }
522 }
523
524 pub fn map_stream(
525 mut self,
526 events: Pin<Box<dyn Send + Stream<Item = Result<ResponseStreamEvent>>>>,
527 ) -> impl Stream<Item = Result<LanguageModelCompletionEvent, LanguageModelCompletionError>>
528 {
529 events.flat_map(move |event| {
530 futures::stream::iter(match event {
531 Ok(event) => self.map_event(event),
532 Err(error) => vec![Err(LanguageModelCompletionError::Other(anyhow!(error)))],
533 })
534 })
535 }
536
537 pub fn map_event(
538 &mut self,
539 event: ResponseStreamEvent,
540 ) -> Vec<Result<LanguageModelCompletionEvent, LanguageModelCompletionError>> {
541 let Some(choice) = event.choices.first() else {
542 return Vec::new();
543 };
544
545 let mut events = Vec::new();
546 if let Some(content) = choice.delta.content.clone() {
547 events.push(Ok(LanguageModelCompletionEvent::Text(content)));
548 }
549
550 if let Some(tool_calls) = choice.delta.tool_calls.as_ref() {
551 for tool_call in tool_calls {
552 let entry = self.tool_calls_by_index.entry(tool_call.index).or_default();
553
554 if let Some(tool_id) = tool_call.id.clone() {
555 entry.id = tool_id;
556 }
557
558 if let Some(function) = tool_call.function.as_ref() {
559 if let Some(name) = function.name.clone() {
560 entry.name = name;
561 }
562
563 if let Some(arguments) = function.arguments.clone() {
564 entry.arguments.push_str(&arguments);
565 }
566 }
567 }
568 }
569
570 match choice.finish_reason.as_deref() {
571 Some("stop") => {
572 events.push(Ok(LanguageModelCompletionEvent::Stop(StopReason::EndTurn)));
573 }
574 Some("tool_calls") => {
575 events.extend(self.tool_calls_by_index.drain().map(|(_, tool_call)| {
576 match serde_json::Value::from_str(&tool_call.arguments) {
577 Ok(input) => Ok(LanguageModelCompletionEvent::ToolUse(
578 LanguageModelToolUse {
579 id: tool_call.id.clone().into(),
580 name: tool_call.name.as_str().into(),
581 is_input_complete: true,
582 input,
583 raw_input: tool_call.arguments.clone(),
584 },
585 )),
586 Err(error) => Err(LanguageModelCompletionError::BadInputJson {
587 id: tool_call.id.into(),
588 tool_name: tool_call.name.as_str().into(),
589 raw_input: tool_call.arguments.into(),
590 json_parse_error: error.to_string(),
591 }),
592 }
593 }));
594
595 events.push(Ok(LanguageModelCompletionEvent::Stop(StopReason::ToolUse)));
596 }
597 Some(stop_reason) => {
598 log::error!("Unexpected Vercel stop_reason: {stop_reason:?}",);
599 events.push(Ok(LanguageModelCompletionEvent::Stop(StopReason::EndTurn)));
600 }
601 None => {}
602 }
603
604 events
605 }
606}
607
608#[derive(Default)]
609struct RawToolCall {
610 id: String,
611 name: String,
612 arguments: String,
613}
614
615pub fn count_vercel_tokens(
616 request: LanguageModelRequest,
617 model: Model,
618 cx: &App,
619) -> BoxFuture<'static, Result<u64>> {
620 cx.background_spawn(async move {
621 let messages = request
622 .messages
623 .into_iter()
624 .map(|message| tiktoken_rs::ChatCompletionRequestMessage {
625 role: match message.role {
626 Role::User => "user".into(),
627 Role::Assistant => "assistant".into(),
628 Role::System => "system".into(),
629 },
630 content: Some(message.string_contents()),
631 name: None,
632 function_call: None,
633 })
634 .collect::<Vec<_>>();
635
636 match model {
637 Model::Custom { max_tokens, .. } => {
638 let model = if max_tokens >= 100_000 {
639 // If the max tokens is 100k or more, it is likely the o200k_base tokenizer from gpt4o
640 "gpt-4o"
641 } else {
642 // Otherwise fallback to gpt-4, since only cl100k_base and o200k_base are
643 // supported with this tiktoken method
644 "gpt-4"
645 };
646 tiktoken_rs::num_tokens_from_messages(model, &messages)
647 }
648 // Map Vercel models to appropriate OpenAI models for token counting
649 // since Vercel uses OpenAI-compatible API
650 Model::VZero => {
651 // Vercel v0 is similar to GPT-4o, so use gpt-4o for token counting
652 tiktoken_rs::num_tokens_from_messages("gpt-4o", &messages)
653 }
654 }
655 .map(|tokens| tokens as u64)
656 })
657 .boxed()
658}
659
660struct ConfigurationView {
661 api_key_editor: Entity<SingleLineInput>,
662 state: gpui::Entity<State>,
663 load_credentials_task: Option<Task<()>>,
664}
665
666impl ConfigurationView {
667 fn new(state: gpui::Entity<State>, window: &mut Window, cx: &mut Context<Self>) -> Self {
668 let api_key_editor = cx.new(|cx| {
669 SingleLineInput::new(
670 window,
671 cx,
672 "v1:0000000000000000000000000000000000000000000000000",
673 )
674 .label("API key")
675 });
676
677 cx.observe(&state, |_, _, cx| {
678 cx.notify();
679 })
680 .detach();
681
682 let load_credentials_task = Some(cx.spawn_in(window, {
683 let state = state.clone();
684 async move |this, cx| {
685 if let Some(task) = state
686 .update(cx, |state, cx| state.authenticate(cx))
687 .log_err()
688 {
689 // We don't log an error, because "not signed in" is also an error.
690 let _ = task.await;
691 }
692 this.update(cx, |this, cx| {
693 this.load_credentials_task = None;
694 cx.notify();
695 })
696 .log_err();
697 }
698 }));
699
700 Self {
701 api_key_editor,
702 state,
703 load_credentials_task,
704 }
705 }
706
707 fn save_api_key(&mut self, _: &menu::Confirm, window: &mut Window, cx: &mut Context<Self>) {
708 let api_key = self
709 .api_key_editor
710 .read(cx)
711 .editor()
712 .read(cx)
713 .text(cx)
714 .trim()
715 .to_string();
716
717 // Don't proceed if no API key is provided and we're not authenticated
718 if api_key.is_empty() && !self.state.read(cx).is_authenticated() {
719 return;
720 }
721
722 let state = self.state.clone();
723 cx.spawn_in(window, async move |_, cx| {
724 state
725 .update(cx, |state, cx| state.set_api_key(api_key, cx))?
726 .await
727 })
728 .detach_and_log_err(cx);
729
730 cx.notify();
731 }
732
733 fn reset_api_key(&mut self, window: &mut Window, cx: &mut Context<Self>) {
734 self.api_key_editor.update(cx, |input, cx| {
735 input.editor.update(cx, |editor, cx| {
736 editor.set_text("", window, cx);
737 });
738 });
739
740 let state = self.state.clone();
741 cx.spawn_in(window, async move |_, cx| {
742 state.update(cx, |state, cx| state.reset_api_key(cx))?.await
743 })
744 .detach_and_log_err(cx);
745
746 cx.notify();
747 }
748
749 fn should_render_editor(&self, cx: &mut Context<Self>) -> bool {
750 !self.state.read(cx).is_authenticated()
751 }
752}
753
754impl Render for ConfigurationView {
755 fn render(&mut self, _: &mut Window, cx: &mut Context<Self>) -> impl IntoElement {
756 let env_var_set = self.state.read(cx).api_key_from_env;
757
758 let api_key_section = if self.should_render_editor(cx) {
759 v_flex()
760 .on_action(cx.listener(Self::save_api_key))
761 .child(Label::new("To use Zed's agent with Vercel v0, you need to add an API key. Follow these steps:"))
762 .child(
763 List::new()
764 .child(InstructionListItem::new(
765 "Create one by visiting",
766 Some("Vercel v0's console"),
767 Some("https://v0.dev/chat/settings/keys"),
768 ))
769 .child(InstructionListItem::text_only(
770 "Paste your API key below and hit enter to start using the agent",
771 )),
772 )
773 .child(self.api_key_editor.clone())
774 .child(
775 Label::new(format!(
776 "You can also assign the {VERCEL_API_KEY_VAR} environment variable and restart Zed."
777 ))
778 .size(LabelSize::Small)
779 .color(Color::Muted),
780 )
781 .child(
782 Label::new("Note that Vercel v0 is a custom OpenAI-compatible provider.")
783 .size(LabelSize::Small)
784 .color(Color::Muted),
785 )
786 .into_any()
787 } else {
788 h_flex()
789 .mt_1()
790 .p_1()
791 .justify_between()
792 .rounded_md()
793 .border_1()
794 .border_color(cx.theme().colors().border)
795 .bg(cx.theme().colors().background)
796 .child(
797 h_flex()
798 .gap_1()
799 .child(Icon::new(IconName::Check).color(Color::Success))
800 .child(Label::new(if env_var_set {
801 format!("API key set in {VERCEL_API_KEY_VAR} environment variable.")
802 } else {
803 "API key configured.".to_string()
804 })),
805 )
806 .child(
807 Button::new("reset-api-key", "Reset API Key")
808 .label_size(LabelSize::Small)
809 .icon(IconName::Undo)
810 .icon_size(IconSize::Small)
811 .icon_position(IconPosition::Start)
812 .layer(ElevationIndex::ModalSurface)
813 .when(env_var_set, |this| {
814 this.tooltip(Tooltip::text(format!("To reset your API key, unset the {VERCEL_API_KEY_VAR} environment variable.")))
815 })
816 .on_click(cx.listener(|this, _, window, cx| this.reset_api_key(window, cx))),
817 )
818 .into_any()
819 };
820
821 if self.load_credentials_task.is_some() {
822 div().child(Label::new("Loading credentials…")).into_any()
823 } else {
824 v_flex().size_full().child(api_key_section).into_any()
825 }
826 }
827}
828
829#[cfg(test)]
830mod tests {
831 use gpui::TestAppContext;
832 use language_model::LanguageModelRequestMessage;
833
834 use super::*;
835
836 #[gpui::test]
837 fn tiktoken_rs_support(cx: &TestAppContext) {
838 let request = LanguageModelRequest {
839 thread_id: None,
840 prompt_id: None,
841 intent: None,
842 mode: None,
843 messages: vec![LanguageModelRequestMessage {
844 role: Role::User,
845 content: vec![MessageContent::Text("message".into())],
846 cache: false,
847 }],
848 tools: vec![],
849 tool_choice: None,
850 stop: vec![],
851 temperature: None,
852 };
853
854 // Validate that all models are supported by tiktoken-rs
855 for model in Model::iter() {
856 let count = cx
857 .executor()
858 .block(count_vercel_tokens(
859 request.clone(),
860 model,
861 &cx.app.borrow(),
862 ))
863 .unwrap();
864 assert!(count > 0);
865 }
866 }
867}