assets/icons/ai_vercel.svg π
@@ -0,0 +1,3 @@
+<svg width="16" height="16" viewBox="0 0 16 16" fill="none" xmlns="http://www.w3.org/2000/svg">
+<path d="M8 1L15.5 14H0.5L8 1Z" fill="black"/>
+</svg>
dancer created
## Summary
- add a new `ai_gateway` language model provider in zed using the
openai-compatible chat completions path
- register ai gateway in provider wiring, settings schema, and default
settings
- add vercel ai gateway icon and provider list entry
- parse ai gateway model capabilities from tags (`tool-use`, `vision`)
and supported parameters
- clean up ai gateway oidc auth failures into a concise actionable error
message
Release Notes:
- Added Vercel AI Gateway as a new LLM provider in Zed.
assets/icons/ai_vercel.svg | 3
assets/settings/default.json | 3
crates/icons/src/icons.rs | 1
crates/language_models/src/language_models.rs | 8
crates/language_models/src/provider.rs | 1
crates/language_models/src/provider/vercel_ai_gateway.rs | 706 ++++++++++
crates/language_models/src/settings.rs | 8
crates/settings_content/src/agent.rs | 1
crates/settings_content/src/language_model.rs | 20
docs/src/ai/llm-providers.md | 27
10 files changed, 776 insertions(+), 2 deletions(-)
@@ -0,0 +1,3 @@
+<svg width="16" height="16" viewBox="0 0 16 16" fill="none" xmlns="http://www.w3.org/2000/svg">
+<path d="M8 1L15.5 14H0.5L8 1Z" fill="black"/>
+</svg>
@@ -2226,6 +2226,9 @@
"vercel": {
"api_url": "https://api.v0.dev/v1",
},
+ "vercel_ai_gateway": {
+ "api_url": "https://ai-gateway.vercel.sh/v1",
+ },
"x_ai": {
"api_url": "https://api.x.ai/v1",
},
@@ -23,6 +23,7 @@ pub enum IconName {
AiOpenAi,
AiOpenAiCompat,
AiOpenRouter,
+ AiVercel,
AiVZero,
AiXAi,
AiZed,
@@ -25,6 +25,7 @@ use crate::provider::open_ai::OpenAiLanguageModelProvider;
use crate::provider::open_ai_compatible::OpenAiCompatibleLanguageModelProvider;
use crate::provider::open_router::OpenRouterLanguageModelProvider;
use crate::provider::vercel::VercelLanguageModelProvider;
+use crate::provider::vercel_ai_gateway::VercelAiGatewayLanguageModelProvider;
use crate::provider::x_ai::XAiLanguageModelProvider;
pub use crate::settings::*;
@@ -208,6 +209,13 @@ fn register_language_model_providers(
Arc::new(VercelLanguageModelProvider::new(client.http_client(), cx)),
cx,
);
+ registry.register_provider(
+ Arc::new(VercelAiGatewayLanguageModelProvider::new(
+ client.http_client(),
+ cx,
+ )),
+ cx,
+ );
registry.register_provider(
Arc::new(XAiLanguageModelProvider::new(client.http_client(), cx)),
cx,
@@ -12,4 +12,5 @@ pub mod open_ai_compatible;
pub mod open_router;
mod util;
pub mod vercel;
+pub mod vercel_ai_gateway;
pub mod x_ai;
@@ -0,0 +1,706 @@
+use anyhow::Result;
+use collections::BTreeMap;
+use futures::{AsyncReadExt, FutureExt, StreamExt, future::BoxFuture};
+use gpui::{AnyView, App, AsyncApp, Context, Entity, SharedString, Task, Window};
+use http_client::{AsyncBody, HttpClient, Method, Request as HttpRequest, http};
+use language_model::{
+ ApiKeyState, AuthenticateError, EnvVar, IconOrSvg, LanguageModel, LanguageModelCompletionError,
+ LanguageModelCompletionEvent, LanguageModelId, LanguageModelName, LanguageModelProvider,
+ LanguageModelProviderId, LanguageModelProviderName, LanguageModelProviderState,
+ LanguageModelRequest, LanguageModelToolChoice, LanguageModelToolSchemaFormat, RateLimiter,
+ env_var,
+};
+use open_ai::ResponseStreamEvent;
+use serde::Deserialize;
+pub use settings::OpenAiCompatibleModelCapabilities as ModelCapabilities;
+pub use settings::VercelAiGatewayAvailableModel as AvailableModel;
+use settings::{Settings, SettingsStore};
+use std::sync::{Arc, LazyLock};
+use ui::{ButtonLink, ConfiguredApiCard, List, ListBulletItem, prelude::*};
+use ui_input::InputField;
+use util::ResultExt;
+
+const PROVIDER_ID: LanguageModelProviderId = LanguageModelProviderId::new("vercel_ai_gateway");
+const PROVIDER_NAME: LanguageModelProviderName =
+ LanguageModelProviderName::new("Vercel AI Gateway");
+
+const API_URL: &str = "https://ai-gateway.vercel.sh/v1";
+const API_KEY_ENV_VAR_NAME: &str = "VERCEL_AI_GATEWAY_API_KEY";
+static API_KEY_ENV_VAR: LazyLock<EnvVar> = env_var!(API_KEY_ENV_VAR_NAME);
+
+#[derive(Default, Clone, Debug, PartialEq)]
+pub struct VercelAiGatewaySettings {
+ pub api_url: String,
+ pub available_models: Vec<AvailableModel>,
+}
+
+pub struct VercelAiGatewayLanguageModelProvider {
+ http_client: Arc<dyn HttpClient>,
+ state: Entity<State>,
+}
+
+pub struct State {
+ api_key_state: ApiKeyState,
+ http_client: Arc<dyn HttpClient>,
+ available_models: Vec<AvailableModel>,
+ fetch_models_task: Option<Task<Result<(), LanguageModelCompletionError>>>,
+}
+
+impl State {
+ fn is_authenticated(&self) -> bool {
+ self.api_key_state.has_key()
+ }
+
+ fn set_api_key(&mut self, api_key: Option<String>, cx: &mut Context<Self>) -> Task<Result<()>> {
+ let api_url = VercelAiGatewayLanguageModelProvider::api_url(cx);
+ self.api_key_state
+ .store(api_url, api_key, |this| &mut this.api_key_state, cx)
+ }
+
+ fn authenticate(&mut self, cx: &mut Context<Self>) -> Task<Result<(), AuthenticateError>> {
+ let api_url = VercelAiGatewayLanguageModelProvider::api_url(cx);
+ let task = self
+ .api_key_state
+ .load_if_needed(api_url, |this| &mut this.api_key_state, cx);
+
+ cx.spawn(async move |this, cx| {
+ let result = task.await;
+ this.update(cx, |this, cx| this.restart_fetch_models_task(cx))
+ .ok();
+ result
+ })
+ }
+
+ fn fetch_models(
+ &mut self,
+ cx: &mut Context<Self>,
+ ) -> Task<Result<(), LanguageModelCompletionError>> {
+ let http_client = self.http_client.clone();
+ let api_url = VercelAiGatewayLanguageModelProvider::api_url(cx);
+ let api_key = self.api_key_state.key(&api_url);
+ cx.spawn(async move |this, cx| {
+ let models = list_models(http_client.as_ref(), &api_url, api_key.as_deref()).await?;
+ this.update(cx, |this, cx| {
+ this.available_models = models;
+ cx.notify();
+ })
+ .map_err(|e| LanguageModelCompletionError::Other(e))?;
+ Ok(())
+ })
+ }
+
+ fn restart_fetch_models_task(&mut self, cx: &mut Context<Self>) {
+ if self.is_authenticated() {
+ let task = self.fetch_models(cx);
+ self.fetch_models_task.replace(task);
+ } else {
+ self.available_models = Vec::new();
+ }
+ }
+}
+
+impl VercelAiGatewayLanguageModelProvider {
+ pub fn new(http_client: Arc<dyn HttpClient>, cx: &mut App) -> Self {
+ let state = cx.new(|cx| {
+ cx.observe_global::<SettingsStore>({
+ let mut last_settings = VercelAiGatewayLanguageModelProvider::settings(cx).clone();
+ move |this: &mut State, cx| {
+ let current_settings = VercelAiGatewayLanguageModelProvider::settings(cx);
+ if current_settings != &last_settings {
+ last_settings = current_settings.clone();
+ this.authenticate(cx).detach();
+ cx.notify();
+ }
+ }
+ })
+ .detach();
+ State {
+ api_key_state: ApiKeyState::new(Self::api_url(cx), (*API_KEY_ENV_VAR).clone()),
+ http_client: http_client.clone(),
+ available_models: Vec::new(),
+ fetch_models_task: None,
+ }
+ });
+
+ Self { http_client, state }
+ }
+
+ fn settings(cx: &App) -> &VercelAiGatewaySettings {
+ &crate::AllLanguageModelSettings::get_global(cx).vercel_ai_gateway
+ }
+
+ fn api_url(cx: &App) -> SharedString {
+ let api_url = &Self::settings(cx).api_url;
+ if api_url.is_empty() {
+ API_URL.into()
+ } else {
+ SharedString::new(api_url.as_str())
+ }
+ }
+
+ fn default_available_model() -> AvailableModel {
+ AvailableModel {
+ name: "openai/gpt-5.3-codex".to_string(),
+ display_name: Some("GPT 5.3 Codex".to_string()),
+ max_tokens: 400_000,
+ max_output_tokens: Some(128_000),
+ max_completion_tokens: None,
+ capabilities: ModelCapabilities::default(),
+ }
+ }
+
+ fn create_language_model(&self, model: AvailableModel) -> Arc<dyn LanguageModel> {
+ Arc::new(VercelAiGatewayLanguageModel {
+ id: LanguageModelId::from(model.name.clone()),
+ model,
+ state: self.state.clone(),
+ http_client: self.http_client.clone(),
+ request_limiter: RateLimiter::new(4),
+ })
+ }
+}
+
+impl LanguageModelProviderState for VercelAiGatewayLanguageModelProvider {
+ type ObservableEntity = State;
+
+ fn observable_entity(&self) -> Option<Entity<Self::ObservableEntity>> {
+ Some(self.state.clone())
+ }
+}
+
+impl LanguageModelProvider for VercelAiGatewayLanguageModelProvider {
+ fn id(&self) -> LanguageModelProviderId {
+ PROVIDER_ID
+ }
+
+ fn name(&self) -> LanguageModelProviderName {
+ PROVIDER_NAME
+ }
+
+ fn icon(&self) -> IconOrSvg {
+ IconOrSvg::Icon(IconName::AiVercel)
+ }
+
+ fn default_model(&self, _cx: &App) -> Option<Arc<dyn LanguageModel>> {
+ Some(self.create_language_model(Self::default_available_model()))
+ }
+
+ fn default_fast_model(&self, _cx: &App) -> Option<Arc<dyn LanguageModel>> {
+ None
+ }
+
+ fn provided_models(&self, cx: &App) -> Vec<Arc<dyn LanguageModel>> {
+ let mut models = BTreeMap::default();
+
+ let default_model = Self::default_available_model();
+ models.insert(default_model.name.clone(), default_model);
+
+ for model in self.state.read(cx).available_models.clone() {
+ models.insert(model.name.clone(), model);
+ }
+
+ for model in &Self::settings(cx).available_models {
+ models.insert(model.name.clone(), model.clone());
+ }
+
+ models
+ .into_values()
+ .map(|model| self.create_language_model(model))
+ .collect()
+ }
+
+ fn is_authenticated(&self, cx: &App) -> bool {
+ self.state.read(cx).is_authenticated()
+ }
+
+ fn authenticate(&self, cx: &mut App) -> Task<Result<(), AuthenticateError>> {
+ self.state.update(cx, |state, cx| state.authenticate(cx))
+ }
+
+ fn configuration_view(
+ &self,
+ _target_agent: language_model::ConfigurationViewTargetAgent,
+ window: &mut Window,
+ cx: &mut App,
+ ) -> AnyView {
+ cx.new(|cx| ConfigurationView::new(self.state.clone(), window, cx))
+ .into()
+ }
+
+ fn reset_credentials(&self, cx: &mut App) -> Task<Result<()>> {
+ self.state
+ .update(cx, |state, cx| state.set_api_key(None, cx))
+ }
+}
+
+pub struct VercelAiGatewayLanguageModel {
+ id: LanguageModelId,
+ model: AvailableModel,
+ state: Entity<State>,
+ http_client: Arc<dyn HttpClient>,
+ request_limiter: RateLimiter,
+}
+
+impl VercelAiGatewayLanguageModel {
+ fn stream_open_ai(
+ &self,
+ request: open_ai::Request,
+ cx: &AsyncApp,
+ ) -> BoxFuture<
+ 'static,
+ Result<
+ futures::stream::BoxStream<'static, Result<ResponseStreamEvent>>,
+ LanguageModelCompletionError,
+ >,
+ > {
+ let http_client = self.http_client.clone();
+ let (api_key, api_url) = self.state.read_with(cx, |state, cx| {
+ let api_url = VercelAiGatewayLanguageModelProvider::api_url(cx);
+ (state.api_key_state.key(&api_url), api_url)
+ });
+
+ let future = self.request_limiter.stream(async move {
+ let provider = PROVIDER_NAME;
+ let Some(api_key) = api_key else {
+ return Err(LanguageModelCompletionError::NoApiKey { provider });
+ };
+ let request = open_ai::stream_completion(
+ http_client.as_ref(),
+ provider.0.as_str(),
+ &api_url,
+ &api_key,
+ request,
+ );
+ let response = request.await.map_err(map_open_ai_error)?;
+ Ok(response)
+ });
+
+ async move { Ok(future.await?.boxed()) }.boxed()
+ }
+}
+
+fn map_open_ai_error(error: open_ai::RequestError) -> LanguageModelCompletionError {
+ match error {
+ open_ai::RequestError::HttpResponseError {
+ status_code,
+ body,
+ headers,
+ ..
+ } => {
+ let retry_after = headers
+ .get(http::header::RETRY_AFTER)
+ .and_then(|value| value.to_str().ok()?.parse::<u64>().ok())
+ .map(std::time::Duration::from_secs);
+
+ LanguageModelCompletionError::from_http_status(
+ PROVIDER_NAME,
+ status_code,
+ extract_error_message(&body),
+ retry_after,
+ )
+ }
+ open_ai::RequestError::Other(error) => LanguageModelCompletionError::Other(error),
+ }
+}
+
+fn extract_error_message(body: &str) -> String {
+ let json = match serde_json::from_str::<serde_json::Value>(body) {
+ Ok(json) => json,
+ Err(_) => return body.to_string(),
+ };
+
+ let message = json
+ .get("error")
+ .and_then(|value| {
+ value
+ .get("message")
+ .and_then(serde_json::Value::as_str)
+ .or_else(|| value.as_str())
+ })
+ .or_else(|| json.get("message").and_then(serde_json::Value::as_str))
+ .map(ToString::to_string)
+ .unwrap_or_else(|| body.to_string());
+
+ clean_error_message(&message)
+}
+
+fn clean_error_message(message: &str) -> String {
+ let lower = message.to_lowercase();
+
+ if lower.contains("vercel_oidc_token") && lower.contains("oidc token") {
+ return "Authentication failed for Vercel AI Gateway. Use a Vercel AI Gateway key (vck_...).\nCreate or manage keys in Vercel AI Gateway console.\nIf this persists, regenerate the key and update it in Vercel AI Gateway provider settings in Zed.".to_string();
+ }
+
+ if lower.contains("invalid api key") || lower.contains("invalid_api_key") {
+ return "Authentication failed for Vercel AI Gateway. Check that your Vercel AI Gateway key starts with vck_ and is active.".to_string();
+ }
+
+ message.to_string()
+}
+
+fn has_tag(tags: &[String], expected: &str) -> bool {
+ tags.iter()
+ .any(|tag| tag.trim().eq_ignore_ascii_case(expected))
+}
+
+impl LanguageModel for VercelAiGatewayLanguageModel {
+ fn id(&self) -> LanguageModelId {
+ self.id.clone()
+ }
+
+ fn name(&self) -> LanguageModelName {
+ LanguageModelName::from(
+ self.model
+ .display_name
+ .clone()
+ .unwrap_or_else(|| self.model.name.clone()),
+ )
+ }
+
+ fn provider_id(&self) -> LanguageModelProviderId {
+ PROVIDER_ID
+ }
+
+ fn provider_name(&self) -> LanguageModelProviderName {
+ PROVIDER_NAME
+ }
+
+ fn supports_tools(&self) -> bool {
+ self.model.capabilities.tools
+ }
+
+ fn tool_input_format(&self) -> LanguageModelToolSchemaFormat {
+ LanguageModelToolSchemaFormat::JsonSchemaSubset
+ }
+
+ fn supports_images(&self) -> bool {
+ self.model.capabilities.images
+ }
+
+ fn supports_tool_choice(&self, choice: LanguageModelToolChoice) -> bool {
+ match choice {
+ LanguageModelToolChoice::Auto => self.model.capabilities.tools,
+ LanguageModelToolChoice::Any => self.model.capabilities.tools,
+ LanguageModelToolChoice::None => true,
+ }
+ }
+
+ fn supports_split_token_display(&self) -> bool {
+ true
+ }
+
+ fn telemetry_id(&self) -> String {
+ format!("vercel_ai_gateway/{}", self.model.name)
+ }
+
+ fn max_token_count(&self) -> u64 {
+ self.model.max_tokens
+ }
+
+ fn max_output_tokens(&self) -> Option<u64> {
+ self.model.max_output_tokens
+ }
+
+ fn count_tokens(
+ &self,
+ request: LanguageModelRequest,
+ cx: &App,
+ ) -> BoxFuture<'static, Result<u64>> {
+ let max_token_count = self.max_token_count();
+ cx.background_spawn(async move {
+ let messages = crate::provider::open_ai::collect_tiktoken_messages(request);
+ let model = if max_token_count >= 100_000 {
+ "gpt-4o"
+ } else {
+ "gpt-4"
+ };
+ tiktoken_rs::num_tokens_from_messages(model, &messages).map(|tokens| tokens as u64)
+ })
+ .boxed()
+ }
+
+ fn stream_completion(
+ &self,
+ request: LanguageModelRequest,
+ cx: &AsyncApp,
+ ) -> BoxFuture<
+ 'static,
+ Result<
+ futures::stream::BoxStream<
+ 'static,
+ Result<LanguageModelCompletionEvent, LanguageModelCompletionError>,
+ >,
+ LanguageModelCompletionError,
+ >,
+ > {
+ let request = crate::provider::open_ai::into_open_ai(
+ request,
+ &self.model.name,
+ self.model.capabilities.parallel_tool_calls,
+ self.model.capabilities.prompt_cache_key,
+ self.max_output_tokens(),
+ None,
+ );
+ let completions = self.stream_open_ai(request, cx);
+ async move {
+ let mapper = crate::provider::open_ai::OpenAiEventMapper::new();
+ Ok(mapper.map_stream(completions.await?).boxed())
+ }
+ .boxed()
+ }
+}
+
+#[derive(Deserialize)]
+struct ModelsResponse {
+ data: Vec<ApiModel>,
+}
+
+#[derive(Deserialize)]
+struct ApiModel {
+ id: String,
+ name: Option<String>,
+ context_window: Option<u64>,
+ max_tokens: Option<u64>,
+ #[serde(default)]
+ r#type: Option<String>,
+ #[serde(default)]
+ supported_parameters: Vec<String>,
+ #[serde(default)]
+ tags: Vec<String>,
+ architecture: Option<ApiModelArchitecture>,
+}
+
+#[derive(Deserialize)]
+struct ApiModelArchitecture {
+ #[serde(default)]
+ input_modalities: Vec<String>,
+}
+
+async fn list_models(
+ client: &dyn HttpClient,
+ api_url: &str,
+ api_key: Option<&str>,
+) -> Result<Vec<AvailableModel>, LanguageModelCompletionError> {
+ let uri = format!("{api_url}/models?include_mappings=true");
+ let mut request_builder = HttpRequest::builder()
+ .method(Method::GET)
+ .uri(uri)
+ .header("Accept", "application/json");
+ if let Some(api_key) = api_key {
+ request_builder = request_builder.header("Authorization", format!("Bearer {}", api_key));
+ }
+ let request = request_builder
+ .body(AsyncBody::default())
+ .map_err(|error| LanguageModelCompletionError::BuildRequestBody {
+ provider: PROVIDER_NAME,
+ error,
+ })?;
+ let mut response =
+ client
+ .send(request)
+ .await
+ .map_err(|error| LanguageModelCompletionError::HttpSend {
+ provider: PROVIDER_NAME,
+ error,
+ })?;
+
+ let mut body = String::new();
+ response
+ .body_mut()
+ .read_to_string(&mut body)
+ .await
+ .map_err(|error| LanguageModelCompletionError::ApiReadResponseError {
+ provider: PROVIDER_NAME,
+ error,
+ })?;
+
+ if !response.status().is_success() {
+ return Err(LanguageModelCompletionError::from_http_status(
+ PROVIDER_NAME,
+ response.status(),
+ extract_error_message(&body),
+ None,
+ ));
+ }
+
+ let response: ModelsResponse = serde_json::from_str(&body).map_err(|error| {
+ LanguageModelCompletionError::DeserializeResponse {
+ provider: PROVIDER_NAME,
+ error,
+ }
+ })?;
+
+ let mut models = Vec::new();
+ for model in response.data {
+ if let Some(model_type) = model.r#type.as_deref()
+ && model_type != "language"
+ {
+ continue;
+ }
+ let supports_tools = model
+ .supported_parameters
+ .iter()
+ .any(|parameter| parameter == "tools")
+ || has_tag(&model.tags, "tool-use")
+ || has_tag(&model.tags, "tools");
+ let supports_images = model.architecture.is_some_and(|architecture| {
+ architecture
+ .input_modalities
+ .iter()
+ .any(|modality| modality == "image")
+ }) || has_tag(&model.tags, "vision")
+ || has_tag(&model.tags, "image-input");
+ let parallel_tool_calls = model
+ .supported_parameters
+ .iter()
+ .any(|parameter| parameter == "parallel_tool_calls");
+ let prompt_cache_key = model
+ .supported_parameters
+ .iter()
+ .any(|parameter| parameter == "prompt_cache_key" || parameter == "cache_control");
+ models.push(AvailableModel {
+ name: model.id.clone(),
+ display_name: model.name.or(Some(model.id)),
+ max_tokens: model.context_window.or(model.max_tokens).unwrap_or(128_000),
+ max_output_tokens: model.max_tokens,
+ max_completion_tokens: None,
+ capabilities: ModelCapabilities {
+ tools: supports_tools,
+ images: supports_images,
+ parallel_tool_calls,
+ prompt_cache_key,
+ chat_completions: true,
+ },
+ });
+ }
+
+ Ok(models)
+}
+
+struct ConfigurationView {
+ api_key_editor: Entity<InputField>,
+ state: Entity<State>,
+ load_credentials_task: Option<Task<()>>,
+}
+
+impl ConfigurationView {
+ fn new(state: Entity<State>, window: &mut Window, cx: &mut Context<Self>) -> Self {
+ let api_key_editor =
+ cx.new(|cx| InputField::new(window, cx, "vck_000000000000000000000000000"));
+
+ cx.observe(&state, |_, _, cx| cx.notify()).detach();
+
+ let load_credentials_task = Some(cx.spawn_in(window, {
+ let state = state.clone();
+ async move |this, cx| {
+ if let Some(task) = Some(state.update(cx, |state, cx| state.authenticate(cx))) {
+ let _ = task.await;
+ }
+ this.update(cx, |this, cx| {
+ this.load_credentials_task = None;
+ cx.notify();
+ })
+ .log_err();
+ }
+ }));
+
+ Self {
+ api_key_editor,
+ state,
+ load_credentials_task,
+ }
+ }
+
+ fn save_api_key(&mut self, _: &menu::Confirm, window: &mut Window, cx: &mut Context<Self>) {
+ let api_key = self.api_key_editor.read(cx).text(cx).trim().to_string();
+ if api_key.is_empty() {
+ return;
+ }
+
+ self.api_key_editor
+ .update(cx, |editor, cx| editor.set_text("", window, cx));
+
+ let state = self.state.clone();
+ cx.spawn_in(window, async move |_, cx| {
+ state
+ .update(cx, |state, cx| state.set_api_key(Some(api_key), cx))
+ .await
+ })
+ .detach_and_log_err(cx);
+ }
+
+ fn reset_api_key(&mut self, window: &mut Window, cx: &mut Context<Self>) {
+ self.api_key_editor
+ .update(cx, |editor, cx| editor.set_text("", window, cx));
+
+ let state = self.state.clone();
+ cx.spawn_in(window, async move |_, cx| {
+ state
+ .update(cx, |state, cx| state.set_api_key(None, cx))
+ .await
+ })
+ .detach_and_log_err(cx);
+ }
+
+ fn should_render_editor(&self, cx: &Context<Self>) -> bool {
+ !self.state.read(cx).is_authenticated()
+ }
+}
+
+impl Render for ConfigurationView {
+ fn render(&mut self, _: &mut Window, cx: &mut Context<Self>) -> impl IntoElement {
+ let env_var_set = self.state.read(cx).api_key_state.is_from_env_var();
+ let configured_card_label = if env_var_set {
+ format!("API key set in {API_KEY_ENV_VAR_NAME} environment variable")
+ } else {
+ let api_url = VercelAiGatewayLanguageModelProvider::api_url(cx);
+ if api_url == API_URL {
+ "API key configured".to_string()
+ } else {
+ format!("API key configured for {}", api_url)
+ }
+ };
+
+ if self.load_credentials_task.is_some() {
+ div().child(Label::new("Loading credentials...")).into_any()
+ } else if self.should_render_editor(cx) {
+ v_flex()
+ .size_full()
+ .on_action(cx.listener(Self::save_api_key))
+ .child(Label::new(
+ "To use Zed's agent with Vercel AI Gateway, you need to add an API key. Follow these steps:",
+ ))
+ .child(
+ List::new()
+ .child(
+ ListBulletItem::new("")
+ .child(Label::new("Create an API key in"))
+ .child(ButtonLink::new(
+ "Vercel AI Gateway's console",
+ "https://vercel.com/d?to=%2F%5Bteam%5D%2F%7E%2Fai%2Fapi-keys&title=Go+to+AI+Gateway",
+ )),
+ )
+ .child(ListBulletItem::new(
+ "Paste your API key below and hit enter to start using the assistant",
+ )),
+ )
+ .child(self.api_key_editor.clone())
+ .child(
+ Label::new(format!(
+ "You can also set the {API_KEY_ENV_VAR_NAME} environment variable and restart Zed.",
+ ))
+ .size(LabelSize::Small)
+ .color(Color::Muted),
+ )
+ .into_any_element()
+ } else {
+ ConfiguredApiCard::new(configured_card_label)
+ .disabled(env_var_set)
+ .when(env_var_set, |this| {
+ this.tooltip_label(format!("To reset your API key, unset the {API_KEY_ENV_VAR_NAME} environment variable."))
+ })
+ .on_click(cx.listener(|this, _, window, cx| this.reset_api_key(window, cx)))
+ .into_any_element()
+ }
+ }
+}
@@ -8,7 +8,7 @@ use crate::provider::{
deepseek::DeepSeekSettings, google::GoogleSettings, lmstudio::LmStudioSettings,
mistral::MistralSettings, ollama::OllamaSettings, open_ai::OpenAiSettings,
open_ai_compatible::OpenAiCompatibleSettings, open_router::OpenRouterSettings,
- vercel::VercelSettings, x_ai::XAiSettings,
+ vercel::VercelSettings, vercel_ai_gateway::VercelAiGatewaySettings, x_ai::XAiSettings,
};
#[derive(Debug, RegisterSetting)]
@@ -24,6 +24,7 @@ pub struct AllLanguageModelSettings {
pub openai: OpenAiSettings,
pub openai_compatible: HashMap<Arc<str>, OpenAiCompatibleSettings>,
pub vercel: VercelSettings,
+ pub vercel_ai_gateway: VercelAiGatewaySettings,
pub x_ai: XAiSettings,
pub zed_dot_dev: ZedDotDevSettings,
}
@@ -44,6 +45,7 @@ impl settings::Settings for AllLanguageModelSettings {
let openai = language_models.openai.unwrap();
let openai_compatible = language_models.openai_compatible.unwrap();
let vercel = language_models.vercel.unwrap();
+ let vercel_ai_gateway = language_models.vercel_ai_gateway.unwrap();
let x_ai = language_models.x_ai.unwrap();
let zed_dot_dev = language_models.zed_dot_dev.unwrap();
Self {
@@ -107,6 +109,10 @@ impl settings::Settings for AllLanguageModelSettings {
api_url: vercel.api_url.unwrap(),
available_models: vercel.available_models.unwrap_or_default(),
},
+ vercel_ai_gateway: VercelAiGatewaySettings {
+ api_url: vercel_ai_gateway.api_url.unwrap(),
+ available_models: vercel_ai_gateway.available_models.unwrap_or_default(),
+ },
x_ai: XAiSettings {
api_url: x_ai.api_url.unwrap(),
available_models: x_ai.available_models.unwrap_or_default(),
@@ -290,6 +290,7 @@ impl JsonSchema for LanguageModelProviderSetting {
"openai",
"openrouter",
"vercel",
+ "vercel_ai_gateway",
"x_ai",
"zed.dev"
]
@@ -20,6 +20,7 @@ pub struct AllLanguageModelSettingsContent {
pub openai: Option<OpenAiSettingsContent>,
pub openai_compatible: Option<HashMap<Arc<str>, OpenAiCompatibleSettingsContent>>,
pub vercel: Option<VercelSettingsContent>,
+ pub vercel_ai_gateway: Option<VercelAiGatewaySettingsContent>,
pub x_ai: Option<XAiSettingsContent>,
#[serde(rename = "zed.dev")]
pub zed_dot_dev: Option<ZedDotDevSettingsContent>,
@@ -301,6 +302,25 @@ pub struct VercelAvailableModel {
pub max_completion_tokens: Option<u64>,
}
+#[with_fallible_options]
+#[derive(Default, Clone, Debug, Serialize, Deserialize, PartialEq, JsonSchema, MergeFrom)]
+pub struct VercelAiGatewaySettingsContent {
+ pub api_url: Option<String>,
+ pub available_models: Option<Vec<VercelAiGatewayAvailableModel>>,
+}
+
+#[with_fallible_options]
+#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema, MergeFrom)]
+pub struct VercelAiGatewayAvailableModel {
+ pub name: String,
+ pub display_name: Option<String>,
+ pub max_tokens: u64,
+ pub max_output_tokens: Option<u64>,
+ pub max_completion_tokens: Option<u64>,
+ #[serde(default)]
+ pub capabilities: OpenAiCompatibleModelCapabilities,
+}
+
#[with_fallible_options]
#[derive(Default, Clone, Debug, Serialize, Deserialize, PartialEq, JsonSchema, MergeFrom)]
pub struct GoogleSettingsContent {
@@ -1,6 +1,6 @@
---
title: LLM Providers - Use Your Own API Keys in Zed
-description: Bring your own API keys to Zed. Set up Anthropic, OpenAI, Google AI, Ollama, DeepSeek, Mistral, OpenRouter, and more.
+description: Bring your own API keys to Zed. Set up Anthropic, OpenAI, Google AI, Ollama, DeepSeek, Mistral, OpenRouter, Vercel AI Gateway, and more.
---
# LLM Providers
@@ -32,6 +32,7 @@ Zed supports these providers with your own API keys:
- [OpenAI](#openai)
- [OpenAI API Compatible](#openai-api-compatible)
- [OpenRouter](#openrouter)
+- [Vercel AI Gateway](#vercel-ai-gateway)
- [Vercel](#vercel-v0)
- [xAI](#xai)
@@ -738,6 +739,30 @@ Example adding routing preferences to a model:
These routing controls let you fineβtune cost, capability, and reliability tradeβoffs without changing the model name you select in the UI.
+### Vercel AI Gateway {#vercel-ai-gateway}
+
+[Vercel AI Gateway](https://vercel.com/ai-gateway) provides access to many models through a single OpenAI-compatible endpoint.
+
+1. Create an API key from your [Vercel AI Gateway keys page](https://vercel.com/d?to=%2F%5Bteam%5D%2F%7E%2Fai%2Fapi-keys&title=Go+to+AI+Gateway)
+2. Open the settings view (`agent: open settings`) and go to the **Vercel AI Gateway** section
+3. Enter your Vercel AI Gateway API key
+
+The Vercel AI Gateway API key will be saved in your keychain.
+
+Zed will also use the `VERCEL_AI_GATEWAY_API_KEY` environment variable if it's defined.
+
+You can also set a custom endpoint for Vercel AI Gateway in your settings file:
+
+```json [settings]
+{
+ "language_models": {
+ "vercel_ai_gateway": {
+ "api_url": "https://ai-gateway.vercel.sh/v1"
+ }
+ }
+}
+```
+
### Vercel v0 {#vercel-v0}
[Vercel v0](https://v0.app/docs/api/model) is a model for generating full-stack apps, with framework-aware completions for stacks like Next.js and Vercel.