diff --git a/Cargo.lock b/Cargo.lock
index 434e74a46219a94296af742d7298889f03d7627f..a3ddf3f4960224f4ebf46c4850f7214d3fc493d1 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -9514,6 +9514,7 @@ dependencies = [
"ollama",
"open_ai",
"open_router",
+ "opencode",
"partial-json-fixer",
"pretty_assertions",
"release_channel",
@@ -11665,6 +11666,20 @@ dependencies = [
"thiserror 2.0.17",
]
+[[package]]
+name = "opencode"
+version = "0.1.0"
+dependencies = [
+ "anyhow",
+ "futures 0.3.31",
+ "google_ai",
+ "http_client",
+ "schemars",
+ "serde",
+ "serde_json",
+ "strum 0.27.2",
+]
+
[[package]]
name = "opener"
version = "0.7.2"
diff --git a/Cargo.toml b/Cargo.toml
index 5f736ef3e83625c89425985e179e973bff4ff67c..dd426748606407aad3fdce359bc4ba0abe64727d 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -134,6 +134,7 @@ members = [
"crates/notifications",
"crates/ollama",
"crates/onboarding",
+ "crates/opencode",
"crates/open_ai",
"crates/open_path_prompt",
"crates/open_router",
@@ -381,6 +382,7 @@ node_runtime = { path = "crates/node_runtime" }
notifications = { path = "crates/notifications" }
ollama = { path = "crates/ollama" }
onboarding = { path = "crates/onboarding" }
+opencode = { path = "crates/opencode" }
open_ai = { path = "crates/open_ai" }
open_path_prompt = { path = "crates/open_path_prompt" }
open_router = { path = "crates/open_router", features = ["schemars"] }
diff --git a/assets/icons/ai_open_code.svg b/assets/icons/ai_open_code.svg
new file mode 100644
index 0000000000000000000000000000000000000000..304b155188c2286a4f8cab208872d0373d8099f1
--- /dev/null
+++ b/assets/icons/ai_open_code.svg
@@ -0,0 +1,3 @@
+
diff --git a/assets/settings/default.json b/assets/settings/default.json
index be1244bd14dc98005e5ba6ecaf5392af2fff9b24..97c74af5ad6b158a8658a944bdc0e5e16982e91f 100644
--- a/assets/settings/default.json
+++ b/assets/settings/default.json
@@ -2245,6 +2245,9 @@
"api_url": "https://api.openai.com/v1",
},
"openai_compatible": {},
+ "opencode": {
+ "api_url": "https://opencode.ai/zen",
+ },
"open_router": {
"api_url": "https://openrouter.ai/api/v1",
},
diff --git a/crates/icons/src/icons.rs b/crates/icons/src/icons.rs
index 3ca6b4f84d4f09fe2114d0bd86e1d30e6a30e1d1..1aefd7fde212ac068562c9a2fc6b612ca9b06330 100644
--- a/crates/icons/src/icons.rs
+++ b/crates/icons/src/icons.rs
@@ -22,6 +22,7 @@ pub enum IconName {
AiOllama,
AiOpenAi,
AiOpenAiCompat,
+ AiOpenCode,
AiOpenRouter,
AiVercel,
AiVZero,
diff --git a/crates/language_models/Cargo.toml b/crates/language_models/Cargo.toml
index 911dfb813ac54d89e764b3d62c50b4411cf8ba9c..4ebfce695e587265ea39077c67c84ce9b01e5352 100644
--- a/crates/language_models/Cargo.toml
+++ b/crates/language_models/Cargo.toml
@@ -47,6 +47,7 @@ menu.workspace = true
mistral = { workspace = true, features = ["schemars"] }
ollama = { workspace = true, features = ["schemars"] }
open_ai = { workspace = true, features = ["schemars"] }
+opencode = { workspace = true, features = ["schemars"] }
open_router = { workspace = true, features = ["schemars"] }
partial-json-fixer.workspace = true
release_channel.workspace = true
diff --git a/crates/language_models/src/language_models.rs b/crates/language_models/src/language_models.rs
index f22ea00c9e801e120bf057a06683487bc4deb22a..55624ed9d52d5dbb9cf8b724e0ea9ca2ef5a894a 100644
--- a/crates/language_models/src/language_models.rs
+++ b/crates/language_models/src/language_models.rs
@@ -24,6 +24,7 @@ use crate::provider::ollama::OllamaLanguageModelProvider;
use crate::provider::open_ai::OpenAiLanguageModelProvider;
use crate::provider::open_ai_compatible::OpenAiCompatibleLanguageModelProvider;
use crate::provider::open_router::OpenRouterLanguageModelProvider;
+use crate::provider::opencode::OpenCodeLanguageModelProvider;
use crate::provider::vercel::VercelLanguageModelProvider;
use crate::provider::vercel_ai_gateway::VercelAiGatewayLanguageModelProvider;
use crate::provider::x_ai::XAiLanguageModelProvider;
@@ -220,5 +221,9 @@ fn register_language_model_providers(
Arc::new(XAiLanguageModelProvider::new(client.http_client(), cx)),
cx,
);
+ registry.register_provider(
+ Arc::new(OpenCodeLanguageModelProvider::new(client.http_client(), cx)),
+ cx,
+ );
registry.register_provider(Arc::new(CopilotChatLanguageModelProvider::new(cx)), cx);
}
diff --git a/crates/language_models/src/provider.rs b/crates/language_models/src/provider.rs
index 27f43e37f5be343c3f80201c013e96d858bb00de..d3c433974599399160e602b8f201b9fd0af874cb 100644
--- a/crates/language_models/src/provider.rs
+++ b/crates/language_models/src/provider.rs
@@ -10,6 +10,7 @@ pub mod ollama;
pub mod open_ai;
pub mod open_ai_compatible;
pub mod open_router;
+pub mod opencode;
mod util;
pub mod vercel;
pub mod vercel_ai_gateway;
diff --git a/crates/language_models/src/provider/opencode.rs b/crates/language_models/src/provider/opencode.rs
new file mode 100644
index 0000000000000000000000000000000000000000..f3953f3cafa4a1f59ff86004628c0a4022f6257e
--- /dev/null
+++ b/crates/language_models/src/provider/opencode.rs
@@ -0,0 +1,646 @@
+use anyhow::Result;
+use collections::BTreeMap;
+use futures::{FutureExt, StreamExt, future::BoxFuture};
+use gpui::{AnyView, App, AsyncApp, Context, Entity, SharedString, Task, Window};
+use http_client::HttpClient;
+use language_model::{
+ ApiKeyState, AuthenticateError, EnvVar, IconOrSvg, LanguageModel, LanguageModelCompletionError,
+ LanguageModelCompletionEvent, LanguageModelId, LanguageModelName, LanguageModelProvider,
+ LanguageModelProviderId, LanguageModelProviderName, LanguageModelProviderState,
+ LanguageModelRequest, LanguageModelToolChoice, RateLimiter, Role, env_var,
+};
+use opencode::{ApiProtocol, OPENCODE_API_URL};
+pub use settings::OpenCodeAvailableModel as AvailableModel;
+use settings::{Settings, SettingsStore};
+use std::sync::{Arc, LazyLock};
+use strum::IntoEnumIterator;
+use ui::{ButtonLink, ConfiguredApiCard, List, ListBulletItem, prelude::*};
+use ui_input::InputField;
+use util::ResultExt;
+
+use crate::provider::anthropic::{AnthropicEventMapper, into_anthropic};
+use crate::provider::google::{GoogleEventMapper, into_google};
+use crate::provider::open_ai::{
+ OpenAiEventMapper, OpenAiResponseEventMapper, into_open_ai, into_open_ai_response,
+};
+
+const PROVIDER_ID: LanguageModelProviderId = LanguageModelProviderId::new("opencode");
+const PROVIDER_NAME: LanguageModelProviderName = LanguageModelProviderName::new("OpenCode Zen");
+
+const API_KEY_ENV_VAR_NAME: &str = "OPENCODE_API_KEY";
+static API_KEY_ENV_VAR: LazyLock = env_var!(API_KEY_ENV_VAR_NAME);
+
+#[derive(Default, Clone, Debug, PartialEq)]
+pub struct OpenCodeSettings {
+ pub api_url: String,
+ pub available_models: Vec,
+}
+
+pub struct OpenCodeLanguageModelProvider {
+ http_client: Arc,
+ state: Entity,
+}
+
+pub struct State {
+ api_key_state: ApiKeyState,
+}
+
+impl State {
+ fn is_authenticated(&self) -> bool {
+ self.api_key_state.has_key()
+ }
+
+ fn set_api_key(&mut self, api_key: Option, cx: &mut Context) -> Task> {
+ let api_url = OpenCodeLanguageModelProvider::api_url(cx);
+ self.api_key_state
+ .store(api_url, api_key, |this| &mut this.api_key_state, cx)
+ }
+
+ fn authenticate(&mut self, cx: &mut Context) -> Task> {
+ let api_url = OpenCodeLanguageModelProvider::api_url(cx);
+ self.api_key_state
+ .load_if_needed(api_url, |this| &mut this.api_key_state, cx)
+ }
+}
+
+impl OpenCodeLanguageModelProvider {
+ pub fn new(http_client: Arc, cx: &mut App) -> Self {
+ let state = cx.new(|cx| {
+ cx.observe_global::(|this: &mut State, cx| {
+ let api_url = Self::api_url(cx);
+ this.api_key_state
+ .handle_url_change(api_url, |this| &mut this.api_key_state, cx);
+ cx.notify();
+ })
+ .detach();
+ State {
+ api_key_state: ApiKeyState::new(Self::api_url(cx), (*API_KEY_ENV_VAR).clone()),
+ }
+ });
+
+ Self { http_client, state }
+ }
+
+ fn create_language_model(&self, model: opencode::Model) -> Arc {
+ Arc::new(OpenCodeLanguageModel {
+ id: LanguageModelId::from(model.id().to_string()),
+ model,
+ state: self.state.clone(),
+ http_client: self.http_client.clone(),
+ request_limiter: RateLimiter::new(4),
+ })
+ }
+
+ pub fn settings(cx: &App) -> &OpenCodeSettings {
+ &crate::AllLanguageModelSettings::get_global(cx).opencode
+ }
+
+ fn api_url(cx: &App) -> SharedString {
+ let api_url = &Self::settings(cx).api_url;
+ if api_url.is_empty() {
+ OPENCODE_API_URL.into()
+ } else {
+ SharedString::new(api_url.as_str())
+ }
+ }
+}
+
+impl LanguageModelProviderState for OpenCodeLanguageModelProvider {
+ type ObservableEntity = State;
+
+ fn observable_entity(&self) -> Option> {
+ Some(self.state.clone())
+ }
+}
+
+impl LanguageModelProvider for OpenCodeLanguageModelProvider {
+ fn id(&self) -> LanguageModelProviderId {
+ PROVIDER_ID
+ }
+
+ fn name(&self) -> LanguageModelProviderName {
+ PROVIDER_NAME
+ }
+
+ fn icon(&self) -> IconOrSvg {
+ IconOrSvg::Icon(IconName::AiOpenCode)
+ }
+
+ fn default_model(&self, _cx: &App) -> Option> {
+ Some(self.create_language_model(opencode::Model::default()))
+ }
+
+ fn default_fast_model(&self, _cx: &App) -> Option> {
+ Some(self.create_language_model(opencode::Model::default_fast()))
+ }
+
+ fn provided_models(&self, cx: &App) -> Vec> {
+ let mut models = BTreeMap::default();
+
+ for model in opencode::Model::iter() {
+ if !matches!(model, opencode::Model::Custom { .. }) {
+ models.insert(model.id().to_string(), model);
+ }
+ }
+
+ for model in &Self::settings(cx).available_models {
+ let protocol = match model.protocol.as_str() {
+ "anthropic" => ApiProtocol::Anthropic,
+ "openai_responses" => ApiProtocol::OpenAiResponses,
+ "openai_chat" => ApiProtocol::OpenAiChat,
+ "google" => ApiProtocol::Google,
+ _ => ApiProtocol::OpenAiChat, // default fallback
+ };
+ models.insert(
+ model.name.clone(),
+ opencode::Model::Custom {
+ name: model.name.clone(),
+ display_name: model.display_name.clone(),
+ max_tokens: model.max_tokens,
+ max_output_tokens: model.max_output_tokens,
+ protocol,
+ },
+ );
+ }
+
+ models
+ .into_values()
+ .map(|model| self.create_language_model(model))
+ .collect()
+ }
+
+ fn is_authenticated(&self, cx: &App) -> bool {
+ self.state.read(cx).is_authenticated()
+ }
+
+ fn authenticate(&self, cx: &mut App) -> Task> {
+ self.state.update(cx, |state, cx| state.authenticate(cx))
+ }
+
+ fn configuration_view(
+ &self,
+ _target_agent: language_model::ConfigurationViewTargetAgent,
+ window: &mut Window,
+ cx: &mut App,
+ ) -> AnyView {
+ cx.new(|cx| ConfigurationView::new(self.state.clone(), window, cx))
+ .into()
+ }
+
+ fn reset_credentials(&self, cx: &mut App) -> Task> {
+ self.state
+ .update(cx, |state, cx| state.set_api_key(None, cx))
+ }
+}
+
+pub struct OpenCodeLanguageModel {
+ id: LanguageModelId,
+ model: opencode::Model,
+ state: Entity,
+ http_client: Arc,
+ request_limiter: RateLimiter,
+}
+
+impl OpenCodeLanguageModel {
+ /// Returns the base API URL (e.g., "https://opencode.ai/zen").
+ fn base_api_url(&self, cx: &AsyncApp) -> SharedString {
+ self.state
+ .read_with(cx, |_, cx| OpenCodeLanguageModelProvider::api_url(cx))
+ }
+
+ fn api_key(&self, cx: &AsyncApp) -> Option> {
+ self.state.read_with(cx, |state, cx| {
+ let api_url = OpenCodeLanguageModelProvider::api_url(cx);
+ state.api_key_state.key(&api_url)
+ })
+ }
+
+ fn stream_anthropic(
+ &self,
+ request: anthropic::Request,
+ cx: &AsyncApp,
+ ) -> BoxFuture<
+ 'static,
+ Result<
+ futures::stream::BoxStream<
+ 'static,
+ Result,
+ >,
+ LanguageModelCompletionError,
+ >,
+ > {
+ let http_client = self.http_client.clone();
+ // Anthropic crate appends /v1/messages to api_url
+ let api_url = self.base_api_url(cx);
+ let api_key = self.api_key(cx);
+
+ let future = self.request_limiter.stream(async move {
+ let Some(api_key) = api_key else {
+ return Err(LanguageModelCompletionError::NoApiKey {
+ provider: PROVIDER_NAME,
+ });
+ };
+ let request = anthropic::stream_completion(
+ http_client.as_ref(),
+ &api_url,
+ &api_key,
+ request,
+ None,
+ );
+ let response = request.await?;
+ Ok(response)
+ });
+
+ async move { Ok(future.await?.boxed()) }.boxed()
+ }
+
+ fn stream_openai_chat(
+ &self,
+ request: open_ai::Request,
+ cx: &AsyncApp,
+ ) -> BoxFuture<
+ 'static,
+ Result>>,
+ > {
+ let http_client = self.http_client.clone();
+ // OpenAI crate appends /chat/completions to api_url, so we pass base + "/v1"
+ let base_url = self.base_api_url(cx);
+ let api_url: SharedString = format!("{base_url}/v1").into();
+ let api_key = self.api_key(cx);
+ let provider_name = PROVIDER_NAME.0.to_string();
+
+ let future = self.request_limiter.stream(async move {
+ let Some(api_key) = api_key else {
+ return Err(LanguageModelCompletionError::NoApiKey {
+ provider: PROVIDER_NAME,
+ });
+ };
+ let request = open_ai::stream_completion(
+ http_client.as_ref(),
+ &provider_name,
+ &api_url,
+ &api_key,
+ request,
+ );
+ let response = request.await?;
+ Ok(response)
+ });
+
+ async move { Ok(future.await?.boxed()) }.boxed()
+ }
+
+ fn stream_openai_response(
+ &self,
+ request: open_ai::responses::Request,
+ cx: &AsyncApp,
+ ) -> BoxFuture<
+ 'static,
+ Result>>,
+ > {
+ let http_client = self.http_client.clone();
+ // Responses crate appends /responses to api_url, so we pass base + "/v1"
+ let base_url = self.base_api_url(cx);
+ let api_url: SharedString = format!("{base_url}/v1").into();
+ let api_key = self.api_key(cx);
+ let provider_name = PROVIDER_NAME.0.to_string();
+
+ let future = self.request_limiter.stream(async move {
+ let Some(api_key) = api_key else {
+ return Err(LanguageModelCompletionError::NoApiKey {
+ provider: PROVIDER_NAME,
+ });
+ };
+ let request = open_ai::responses::stream_response(
+ http_client.as_ref(),
+ &provider_name,
+ &api_url,
+ &api_key,
+ request,
+ );
+ let response = request.await?;
+ Ok(response)
+ });
+
+ async move { Ok(future.await?.boxed()) }.boxed()
+ }
+
+ fn stream_google_zen(
+ &self,
+ request: google_ai::GenerateContentRequest,
+ cx: &AsyncApp,
+ ) -> BoxFuture<
+ 'static,
+ Result>>,
+ > {
+ let http_client = self.http_client.clone();
+ let api_url = self.base_api_url(cx);
+ let api_key = self.api_key(cx);
+
+ let future = self.request_limiter.stream(async move {
+ let Some(api_key) = api_key else {
+ return Err(LanguageModelCompletionError::NoApiKey {
+ provider: PROVIDER_NAME,
+ });
+ };
+ let request = opencode::stream_generate_content_zen(
+ http_client.as_ref(),
+ &api_url,
+ &api_key,
+ request,
+ );
+ let response = request.await?;
+ Ok(response)
+ });
+
+ async move { Ok(future.await?.boxed()) }.boxed()
+ }
+}
+
+impl LanguageModel for OpenCodeLanguageModel {
+ fn id(&self) -> LanguageModelId {
+ self.id.clone()
+ }
+
+ fn name(&self) -> LanguageModelName {
+ LanguageModelName::from(self.model.display_name().to_string())
+ }
+
+ fn provider_id(&self) -> LanguageModelProviderId {
+ PROVIDER_ID
+ }
+
+ fn provider_name(&self) -> LanguageModelProviderName {
+ PROVIDER_NAME
+ }
+
+ fn supports_tools(&self) -> bool {
+ self.model.supports_tools()
+ }
+
+ fn supports_images(&self) -> bool {
+ self.model.supports_images()
+ }
+
+ fn supports_tool_choice(&self, choice: LanguageModelToolChoice) -> bool {
+ match choice {
+ LanguageModelToolChoice::Auto | LanguageModelToolChoice::Any => true,
+ LanguageModelToolChoice::None => {
+ // Google models don't support None tool choice
+ self.model.protocol() != ApiProtocol::Google
+ }
+ }
+ }
+
+ fn telemetry_id(&self) -> String {
+ format!("opencode/{}", self.model.id())
+ }
+
+ fn max_token_count(&self) -> u64 {
+ self.model.max_token_count()
+ }
+
+ fn max_output_tokens(&self) -> Option {
+ self.model.max_output_tokens()
+ }
+
+ fn count_tokens(
+ &self,
+ request: LanguageModelRequest,
+ cx: &App,
+ ) -> BoxFuture<'static, Result> {
+ cx.background_spawn(async move {
+ let messages = request
+ .messages
+ .into_iter()
+ .map(|message| tiktoken_rs::ChatCompletionRequestMessage {
+ role: match message.role {
+ Role::User => "user".into(),
+ Role::Assistant => "assistant".into(),
+ Role::System => "system".into(),
+ },
+ content: Some(message.string_contents()),
+ name: None,
+ function_call: None,
+ })
+ .collect::>();
+
+ tiktoken_rs::num_tokens_from_messages("gpt-4o", &messages).map(|tokens| tokens as u64)
+ })
+ .boxed()
+ }
+
+ fn stream_completion(
+ &self,
+ request: LanguageModelRequest,
+ cx: &AsyncApp,
+ ) -> BoxFuture<
+ 'static,
+ Result<
+ futures::stream::BoxStream<
+ 'static,
+ Result,
+ >,
+ LanguageModelCompletionError,
+ >,
+ > {
+ match self.model.protocol() {
+ ApiProtocol::Anthropic => {
+ let anthropic_request = into_anthropic(
+ request,
+ self.model.id().to_string(),
+ 1.0,
+ self.model.max_output_tokens().unwrap_or(8192),
+ anthropic::AnthropicModelMode::Default,
+ );
+ let stream = self.stream_anthropic(anthropic_request, cx);
+ async move {
+ let mapper = AnthropicEventMapper::new();
+ Ok(mapper.map_stream(stream.await?).boxed())
+ }
+ .boxed()
+ }
+ ApiProtocol::OpenAiChat => {
+ let openai_request = into_open_ai(
+ request,
+ self.model.id(),
+ false,
+ false,
+ self.model.max_output_tokens(),
+ None,
+ );
+ let stream = self.stream_openai_chat(openai_request, cx);
+ async move {
+ let mapper = OpenAiEventMapper::new();
+ Ok(mapper.map_stream(stream.await?).boxed())
+ }
+ .boxed()
+ }
+ ApiProtocol::OpenAiResponses => {
+ let response_request = into_open_ai_response(
+ request,
+ self.model.id(),
+ false,
+ false,
+ self.model.max_output_tokens(),
+ None,
+ );
+ let stream = self.stream_openai_response(response_request, cx);
+ async move {
+ let mapper = OpenAiResponseEventMapper::new();
+ Ok(mapper.map_stream(stream.await?).boxed())
+ }
+ .boxed()
+ }
+ ApiProtocol::Google => {
+ let google_request = into_google(
+ request,
+ self.model.id().to_string(),
+ google_ai::GoogleModelMode::Default,
+ );
+ let stream = self.stream_google_zen(google_request, cx);
+ async move {
+ let mapper = GoogleEventMapper::new();
+ Ok(mapper.map_stream(stream.await?.boxed()).boxed())
+ }
+ .boxed()
+ }
+ }
+ }
+}
+
+struct ConfigurationView {
+ api_key_editor: Entity,
+ state: Entity,
+ load_credentials_task: Option>,
+}
+
+impl ConfigurationView {
+ fn new(state: Entity, window: &mut Window, cx: &mut Context) -> Self {
+ let api_key_editor = cx.new(|cx| {
+ InputField::new(window, cx, "sk-00000000000000000000000000000000").label("API key")
+ });
+
+ cx.observe(&state, |_, _, cx| {
+ cx.notify();
+ })
+ .detach();
+
+ let load_credentials_task = Some(cx.spawn_in(window, {
+ let state = state.clone();
+ async move |this, cx| {
+ if let Some(task) = Some(state.update(cx, |state, cx| state.authenticate(cx))) {
+ let _ = task.await;
+ }
+ this.update(cx, |this, cx| {
+ this.load_credentials_task = None;
+ cx.notify();
+ })
+ .log_err();
+ }
+ }));
+
+ Self {
+ api_key_editor,
+ state,
+ load_credentials_task,
+ }
+ }
+
+ fn save_api_key(&mut self, _: &menu::Confirm, window: &mut Window, cx: &mut Context) {
+ let api_key = self.api_key_editor.read(cx).text(cx).trim().to_string();
+ if api_key.is_empty() {
+ return;
+ }
+
+ self.api_key_editor
+ .update(cx, |editor, cx| editor.set_text("", window, cx));
+
+ let state = self.state.clone();
+ cx.spawn_in(window, async move |_, cx| {
+ state
+ .update(cx, |state, cx| state.set_api_key(Some(api_key), cx))
+ .await
+ })
+ .detach_and_log_err(cx);
+ }
+
+ fn reset_api_key(&mut self, window: &mut Window, cx: &mut Context) {
+ self.api_key_editor
+ .update(cx, |editor, cx| editor.set_text("", window, cx));
+
+ let state = self.state.clone();
+ cx.spawn_in(window, async move |_, cx| {
+ state
+ .update(cx, |state, cx| state.set_api_key(None, cx))
+ .await
+ })
+ .detach_and_log_err(cx);
+ }
+
+ fn should_render_editor(&self, cx: &mut Context) -> bool {
+ !self.state.read(cx).is_authenticated()
+ }
+}
+
+impl Render for ConfigurationView {
+ fn render(&mut self, _: &mut Window, cx: &mut Context) -> impl IntoElement {
+ let env_var_set = self.state.read(cx).api_key_state.is_from_env_var();
+ let configured_card_label = if env_var_set {
+ format!("API key set in {API_KEY_ENV_VAR_NAME} environment variable")
+ } else {
+ let api_url = OpenCodeLanguageModelProvider::api_url(cx);
+ if api_url == OPENCODE_API_URL {
+ "API key configured".to_string()
+ } else {
+ format!("API key configured for {}", api_url)
+ }
+ };
+
+ let api_key_section = if self.should_render_editor(cx) {
+ v_flex()
+ .on_action(cx.listener(Self::save_api_key))
+ .child(Label::new(
+ "To use OpenCode Zen models in Zed, you need an API key:",
+ ))
+ .child(
+ List::new()
+ .child(
+ ListBulletItem::new("")
+ .child(Label::new("Sign in and get your key at"))
+ .child(ButtonLink::new(
+ "OpenCode Zen Console",
+ "https://opencode.ai/zen",
+ )),
+ )
+ .child(ListBulletItem::new(
+ "Paste your API key below and hit enter to start using OpenCode Zen",
+ )),
+ )
+ .child(self.api_key_editor.clone())
+ .child(
+ Label::new(format!(
+ "You can also set the {API_KEY_ENV_VAR_NAME} environment variable and restart Zed."
+ ))
+ .size(LabelSize::Small)
+ .color(Color::Muted),
+ )
+ .into_any_element()
+ } else {
+ ConfiguredApiCard::new(configured_card_label)
+ .disabled(env_var_set)
+ .when(env_var_set, |this| {
+ this.tooltip_label(format!(
+ "To reset your API key, unset the {API_KEY_ENV_VAR_NAME} environment variable."
+ ))
+ })
+ .on_click(cx.listener(|this, _, window, cx| this.reset_api_key(window, cx)))
+ .into_any_element()
+ };
+
+ if self.load_credentials_task.is_some() {
+ div().child(Label::new("Loading credentials...")).into_any()
+ } else {
+ v_flex().size_full().child(api_key_section).into_any()
+ }
+ }
+}
diff --git a/crates/language_models/src/settings.rs b/crates/language_models/src/settings.rs
index 7466a337f636abcd8ad70343dfd64a825a7fb6a7..f60d4c6cb498519133098f6306746c5a59e7a1d9 100644
--- a/crates/language_models/src/settings.rs
+++ b/crates/language_models/src/settings.rs
@@ -8,7 +8,8 @@ use crate::provider::{
deepseek::DeepSeekSettings, google::GoogleSettings, lmstudio::LmStudioSettings,
mistral::MistralSettings, ollama::OllamaSettings, open_ai::OpenAiSettings,
open_ai_compatible::OpenAiCompatibleSettings, open_router::OpenRouterSettings,
- vercel::VercelSettings, vercel_ai_gateway::VercelAiGatewaySettings, x_ai::XAiSettings,
+ opencode::OpenCodeSettings, vercel::VercelSettings, vercel_ai_gateway::VercelAiGatewaySettings,
+ x_ai::XAiSettings,
};
#[derive(Debug, RegisterSetting)]
@@ -20,6 +21,7 @@ pub struct AllLanguageModelSettings {
pub lmstudio: LmStudioSettings,
pub mistral: MistralSettings,
pub ollama: OllamaSettings,
+ pub opencode: OpenCodeSettings,
pub open_router: OpenRouterSettings,
pub openai: OpenAiSettings,
pub openai_compatible: HashMap, OpenAiCompatibleSettings>,
@@ -41,6 +43,7 @@ impl settings::Settings for AllLanguageModelSettings {
let lmstudio = language_models.lmstudio.unwrap();
let mistral = language_models.mistral.unwrap();
let ollama = language_models.ollama.unwrap();
+ let opencode = language_models.opencode.unwrap();
let open_router = language_models.open_router.unwrap();
let openai = language_models.openai.unwrap();
let openai_compatible = language_models.openai_compatible.unwrap();
@@ -85,6 +88,10 @@ impl settings::Settings for AllLanguageModelSettings {
available_models: ollama.available_models.unwrap_or_default(),
context_window: ollama.context_window,
},
+ opencode: OpenCodeSettings {
+ api_url: opencode.api_url.unwrap(),
+ available_models: opencode.available_models.unwrap_or_default(),
+ },
open_router: OpenRouterSettings {
api_url: open_router.api_url.unwrap(),
available_models: open_router.available_models.unwrap_or_default(),
diff --git a/crates/opencode/Cargo.toml b/crates/opencode/Cargo.toml
new file mode 100644
index 0000000000000000000000000000000000000000..758d2f2479b9f8be2a2ed53d08e40a5cf510f286
--- /dev/null
+++ b/crates/opencode/Cargo.toml
@@ -0,0 +1,27 @@
+[package]
+name = "opencode"
+version = "0.1.0"
+edition.workspace = true
+publish.workspace = true
+license = "GPL-3.0-or-later"
+
+[lints]
+workspace = true
+
+[lib]
+path = "src/opencode.rs"
+test = false
+
+[features]
+default = []
+schemars = ["dep:schemars"]
+
+[dependencies]
+anyhow.workspace = true
+futures.workspace = true
+google_ai.workspace = true
+http_client.workspace = true
+schemars = { workspace = true, optional = true }
+serde.workspace = true
+serde_json.workspace = true
+strum.workspace = true
diff --git a/crates/opencode/LICENSE-GPL b/crates/opencode/LICENSE-GPL
new file mode 120000
index 0000000000000000000000000000000000000000..89e542f750cd3860a0598eff0dc34b56d7336dc4
--- /dev/null
+++ b/crates/opencode/LICENSE-GPL
@@ -0,0 +1 @@
+../../LICENSE-GPL
\ No newline at end of file
diff --git a/crates/opencode/src/opencode.rs b/crates/opencode/src/opencode.rs
new file mode 100644
index 0000000000000000000000000000000000000000..a44ea7edebe660cbf27dd2d6517fa08b358859d8
--- /dev/null
+++ b/crates/opencode/src/opencode.rs
@@ -0,0 +1,453 @@
+use anyhow::{Result, anyhow};
+use futures::{AsyncBufReadExt, AsyncReadExt, StreamExt, io::BufReader, stream::BoxStream};
+use http_client::{AsyncBody, HttpClient, Method, Request as HttpRequest};
+use serde::{Deserialize, Serialize};
+use strum::EnumIter;
+
+pub const OPENCODE_API_URL: &str = "https://opencode.ai/zen";
+
+#[derive(Clone, Copy, Debug, Default, PartialEq, Eq, Serialize, Deserialize)]
+#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))]
+#[serde(rename_all = "snake_case")]
+pub enum ApiProtocol {
+ #[default]
+ Anthropic,
+ OpenAiResponses,
+ OpenAiChat,
+ Google,
+}
+
+#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))]
+#[derive(Clone, Debug, Default, Serialize, Deserialize, PartialEq, EnumIter)]
+pub enum Model {
+ // -- Anthropic protocol models --
+ #[serde(rename = "claude-opus-4-6")]
+ ClaudeOpus4_6,
+ #[serde(rename = "claude-opus-4-5")]
+ ClaudeOpus4_5,
+ #[serde(rename = "claude-opus-4-1")]
+ ClaudeOpus4_1,
+ #[default]
+ #[serde(rename = "claude-sonnet-4-6")]
+ ClaudeSonnet4_6,
+ #[serde(rename = "claude-sonnet-4-5")]
+ ClaudeSonnet4_5,
+ #[serde(rename = "claude-sonnet-4")]
+ ClaudeSonnet4,
+ #[serde(rename = "claude-haiku-4-5")]
+ ClaudeHaiku4_5,
+ #[serde(rename = "claude-3-5-haiku")]
+ Claude3_5Haiku,
+
+ // -- OpenAI Responses API models --
+ #[serde(rename = "gpt-5.4")]
+ Gpt5_4,
+ #[serde(rename = "gpt-5.4-pro")]
+ Gpt5_4Pro,
+ #[serde(rename = "gpt-5.4-mini")]
+ Gpt5_4Mini,
+ #[serde(rename = "gpt-5.4-nano")]
+ Gpt5_4Nano,
+ #[serde(rename = "gpt-5.3-codex")]
+ Gpt5_3Codex,
+ #[serde(rename = "gpt-5.3-codex-spark")]
+ Gpt5_3Spark,
+ #[serde(rename = "gpt-5.2")]
+ Gpt5_2,
+ #[serde(rename = "gpt-5.2-codex")]
+ Gpt5_2Codex,
+ #[serde(rename = "gpt-5.1")]
+ Gpt5_1,
+ #[serde(rename = "gpt-5.1-codex")]
+ Gpt5_1Codex,
+ #[serde(rename = "gpt-5.1-codex-max")]
+ Gpt5_1CodexMax,
+ #[serde(rename = "gpt-5.1-codex-mini")]
+ Gpt5_1CodexMini,
+ #[serde(rename = "gpt-5")]
+ Gpt5,
+ #[serde(rename = "gpt-5-codex")]
+ Gpt5Codex,
+ #[serde(rename = "gpt-5-nano")]
+ Gpt5Nano,
+
+ // -- Google protocol models --
+ #[serde(rename = "gemini-3.1-pro")]
+ Gemini3_1Pro,
+ #[serde(rename = "gemini-3-flash")]
+ Gemini3Flash,
+
+ // -- OpenAI Chat Completions protocol models --
+ #[serde(rename = "minimax-m2.5")]
+ MiniMaxM2_5,
+ #[serde(rename = "minimax-m2.5-free")]
+ MiniMaxM2_5Free,
+ #[serde(rename = "glm-5")]
+ Glm5,
+ #[serde(rename = "kimi-k2.5")]
+ KimiK2_5,
+ #[serde(rename = "mimo-v2-pro-free")]
+ MimoV2ProFree,
+ #[serde(rename = "mimo-v2-omni-free")]
+ MimoV2OmniFree,
+ #[serde(rename = "mimo-v2-flash-free")]
+ MimoV2FlashFree,
+ #[serde(rename = "trinity-large-preview-free")]
+ TrinityLargePreviewFree,
+ #[serde(rename = "big-pickle")]
+ BigPickle,
+ #[serde(rename = "nemotron-3-super-free")]
+ Nemotron3SuperFree,
+
+ // -- Custom model --
+ #[serde(rename = "custom")]
+ Custom {
+ name: String,
+ display_name: Option,
+ max_tokens: u64,
+ max_output_tokens: Option,
+ protocol: ApiProtocol,
+ },
+}
+
+impl Model {
+ pub fn default_fast() -> Self {
+ Self::ClaudeHaiku4_5
+ }
+
+ pub fn id(&self) -> &str {
+ match self {
+ Self::ClaudeOpus4_6 => "claude-opus-4-6",
+ Self::ClaudeOpus4_5 => "claude-opus-4-5",
+ Self::ClaudeOpus4_1 => "claude-opus-4-1",
+ Self::ClaudeSonnet4_6 => "claude-sonnet-4-6",
+ Self::ClaudeSonnet4_5 => "claude-sonnet-4-5",
+ Self::ClaudeSonnet4 => "claude-sonnet-4",
+ Self::ClaudeHaiku4_5 => "claude-haiku-4-5",
+ Self::Claude3_5Haiku => "claude-3-5-haiku",
+
+ Self::Gpt5_4 => "gpt-5.4",
+ Self::Gpt5_4Pro => "gpt-5.4-pro",
+ Self::Gpt5_4Mini => "gpt-5.4-mini",
+ Self::Gpt5_4Nano => "gpt-5.4-nano",
+ Self::Gpt5_3Codex => "gpt-5.3-codex",
+ Self::Gpt5_3Spark => "gpt-5.3-codex-spark",
+ Self::Gpt5_2 => "gpt-5.2",
+ Self::Gpt5_2Codex => "gpt-5.2-codex",
+ Self::Gpt5_1 => "gpt-5.1",
+ Self::Gpt5_1Codex => "gpt-5.1-codex",
+ Self::Gpt5_1CodexMax => "gpt-5.1-codex-max",
+ Self::Gpt5_1CodexMini => "gpt-5.1-codex-mini",
+ Self::Gpt5 => "gpt-5",
+ Self::Gpt5Codex => "gpt-5-codex",
+ Self::Gpt5Nano => "gpt-5-nano",
+
+ Self::Gemini3_1Pro => "gemini-3.1-pro",
+ Self::Gemini3Flash => "gemini-3-flash",
+
+ Self::MiniMaxM2_5 => "minimax-m2.5",
+ Self::MiniMaxM2_5Free => "minimax-m2.5-free",
+ Self::Glm5 => "glm-5",
+ Self::KimiK2_5 => "kimi-k2.5",
+ Self::MimoV2ProFree => "mimo-v2-pro-free",
+ Self::MimoV2OmniFree => "mimo-v2-omni-free",
+ Self::MimoV2FlashFree => "mimo-v2-flash-free",
+ Self::TrinityLargePreviewFree => "trinity-large-preview-free",
+ Self::BigPickle => "big-pickle",
+ Self::Nemotron3SuperFree => "nemotron-3-super-free",
+
+ Self::Custom { name, .. } => name,
+ }
+ }
+
+ pub fn display_name(&self) -> &str {
+ match self {
+ Self::ClaudeOpus4_6 => "Claude Opus 4.6",
+ Self::ClaudeOpus4_5 => "Claude Opus 4.5",
+ Self::ClaudeOpus4_1 => "Claude Opus 4.1",
+ Self::ClaudeSonnet4_6 => "Claude Sonnet 4.6",
+ Self::ClaudeSonnet4_5 => "Claude Sonnet 4.5",
+ Self::ClaudeSonnet4 => "Claude Sonnet 4",
+ Self::ClaudeHaiku4_5 => "Claude Haiku 4.5",
+ Self::Claude3_5Haiku => "Claude Haiku 3.5",
+
+ Self::Gpt5_4 => "GPT 5.4",
+ Self::Gpt5_4Pro => "GPT 5.4 Pro",
+ Self::Gpt5_4Mini => "GPT 5.4 Mini",
+ Self::Gpt5_4Nano => "GPT 5.4 Nano",
+ Self::Gpt5_3Codex => "GPT 5.3 Codex",
+ Self::Gpt5_3Spark => "GPT 5.3 Codex Spark",
+ Self::Gpt5_2 => "GPT 5.2",
+ Self::Gpt5_2Codex => "GPT 5.2 Codex",
+ Self::Gpt5_1 => "GPT 5.1",
+ Self::Gpt5_1Codex => "GPT 5.1 Codex",
+ Self::Gpt5_1CodexMax => "GPT 5.1 Codex Max",
+ Self::Gpt5_1CodexMini => "GPT 5.1 Codex Mini",
+ Self::Gpt5 => "GPT 5",
+ Self::Gpt5Codex => "GPT 5 Codex",
+ Self::Gpt5Nano => "GPT 5 Nano",
+
+ Self::Gemini3_1Pro => "Gemini 3.1 Pro",
+ Self::Gemini3Flash => "Gemini 3 Flash",
+
+ Self::MiniMaxM2_5 => "MiniMax M2.5",
+ Self::MiniMaxM2_5Free => "MiniMax M2.5 Free",
+ Self::Glm5 => "GLM 5",
+ Self::KimiK2_5 => "Kimi K2.5",
+ Self::MimoV2ProFree => "MiMo V2 Pro Free",
+ Self::MimoV2OmniFree => "MiMo V2 Omni Free",
+ Self::MimoV2FlashFree => "MiMo V2 Flash Free",
+ Self::TrinityLargePreviewFree => "Trinity Large Preview Free",
+ Self::BigPickle => "Big Pickle",
+ Self::Nemotron3SuperFree => "Nemotron 3 Super Free",
+
+ Self::Custom {
+ name, display_name, ..
+ } => display_name.as_deref().unwrap_or(name),
+ }
+ }
+
+ pub fn protocol(&self) -> ApiProtocol {
+ match self {
+ Self::ClaudeOpus4_6
+ | Self::ClaudeOpus4_5
+ | Self::ClaudeOpus4_1
+ | Self::ClaudeSonnet4_6
+ | Self::ClaudeSonnet4_5
+ | Self::ClaudeSonnet4
+ | Self::ClaudeHaiku4_5
+ | Self::Claude3_5Haiku => ApiProtocol::Anthropic,
+
+ Self::Gpt5_4
+ | Self::Gpt5_4Pro
+ | Self::Gpt5_4Mini
+ | Self::Gpt5_4Nano
+ | Self::Gpt5_3Codex
+ | Self::Gpt5_3Spark
+ | Self::Gpt5_2
+ | Self::Gpt5_2Codex
+ | Self::Gpt5_1
+ | Self::Gpt5_1Codex
+ | Self::Gpt5_1CodexMax
+ | Self::Gpt5_1CodexMini
+ | Self::Gpt5
+ | Self::Gpt5Codex
+ | Self::Gpt5Nano => ApiProtocol::OpenAiResponses,
+
+ Self::Gemini3_1Pro | Self::Gemini3Flash => ApiProtocol::Google,
+
+ Self::MiniMaxM2_5
+ | Self::MiniMaxM2_5Free
+ | Self::Glm5
+ | Self::KimiK2_5
+ | Self::MimoV2ProFree
+ | Self::MimoV2OmniFree
+ | Self::MimoV2FlashFree
+ | Self::TrinityLargePreviewFree
+ | Self::BigPickle
+ | Self::Nemotron3SuperFree => ApiProtocol::OpenAiChat,
+
+ Self::Custom { protocol, .. } => *protocol,
+ }
+ }
+
+ pub fn max_token_count(&self) -> u64 {
+ match self {
+ // Anthropic models
+ Self::ClaudeOpus4_6 | Self::ClaudeSonnet4_6 => 1_000_000,
+ Self::ClaudeOpus4_5 | Self::ClaudeSonnet4_5 | Self::ClaudeSonnet4 => 200_000,
+ Self::ClaudeOpus4_1 => 200_000,
+ Self::ClaudeHaiku4_5 => 200_000,
+ Self::Claude3_5Haiku => 200_000,
+
+ // OpenAI models
+ Self::Gpt5_4 | Self::Gpt5_4Pro => 1_050_000,
+ Self::Gpt5_4Mini | Self::Gpt5_4Nano => 400_000,
+ Self::Gpt5_3Codex => 400_000,
+ Self::Gpt5_3Spark => 128_000,
+ Self::Gpt5_2 | Self::Gpt5_2Codex => 400_000,
+ Self::Gpt5_1 | Self::Gpt5_1Codex | Self::Gpt5_1CodexMax | Self::Gpt5_1CodexMini => {
+ 400_000
+ }
+ Self::Gpt5 | Self::Gpt5Codex | Self::Gpt5Nano => 400_000,
+
+ // Google models
+ Self::Gemini3_1Pro => 1_048_576,
+ Self::Gemini3Flash => 1_048_576,
+
+ // OpenAI-compatible models
+ Self::MiniMaxM2_5 | Self::MiniMaxM2_5Free => 196_608,
+ Self::Glm5 => 200_000,
+ Self::KimiK2_5 => 262_144,
+ Self::MimoV2ProFree => 1_048_576,
+ Self::MimoV2OmniFree | Self::MimoV2FlashFree => 262_144,
+ Self::TrinityLargePreviewFree => 131_072,
+ Self::BigPickle => 200_000,
+ Self::Nemotron3SuperFree => 262_144,
+
+ Self::Custom { max_tokens, .. } => *max_tokens,
+ }
+ }
+
+ pub fn max_output_tokens(&self) -> Option {
+ match self {
+ // Anthropic models
+ Self::ClaudeOpus4_6 => Some(128_000),
+ Self::ClaudeSonnet4_6 => Some(64_000),
+ Self::ClaudeOpus4_5
+ | Self::ClaudeOpus4_1
+ | Self::ClaudeSonnet4_5
+ | Self::ClaudeSonnet4
+ | Self::ClaudeHaiku4_5 => Some(64_000),
+ Self::Claude3_5Haiku => Some(8_192),
+
+ // OpenAI models
+ Self::Gpt5_4
+ | Self::Gpt5_4Pro
+ | Self::Gpt5_4Mini
+ | Self::Gpt5_4Nano
+ | Self::Gpt5_3Codex
+ | Self::Gpt5_3Spark
+ | Self::Gpt5_2
+ | Self::Gpt5_2Codex
+ | Self::Gpt5_1
+ | Self::Gpt5_1Codex
+ | Self::Gpt5_1CodexMax
+ | Self::Gpt5_1CodexMini
+ | Self::Gpt5
+ | Self::Gpt5Codex
+ | Self::Gpt5Nano => Some(128_000),
+
+ // Google models
+ Self::Gemini3_1Pro | Self::Gemini3Flash => Some(65_536),
+
+ // OpenAI-compatible models
+ Self::MiniMaxM2_5 | Self::MiniMaxM2_5Free => Some(65_536),
+ Self::Glm5 | Self::BigPickle => Some(128_000),
+ Self::KimiK2_5 => Some(65_536),
+ Self::MimoV2ProFree => Some(131_072),
+ Self::MimoV2OmniFree | Self::MimoV2FlashFree => Some(65_536),
+ Self::TrinityLargePreviewFree | Self::Nemotron3SuperFree => Some(16_384),
+
+ Self::Custom {
+ max_output_tokens, ..
+ } => *max_output_tokens,
+ }
+ }
+
+ pub fn supports_tools(&self) -> bool {
+ true
+ }
+
+ pub fn supports_images(&self) -> bool {
+ match self {
+ // Anthropic models support images
+ Self::ClaudeOpus4_6
+ | Self::ClaudeOpus4_5
+ | Self::ClaudeOpus4_1
+ | Self::ClaudeSonnet4_6
+ | Self::ClaudeSonnet4_5
+ | Self::ClaudeSonnet4
+ | Self::ClaudeHaiku4_5
+ | Self::Claude3_5Haiku => true,
+
+ // OpenAI models support images
+ Self::Gpt5_4
+ | Self::Gpt5_4Pro
+ | Self::Gpt5_4Mini
+ | Self::Gpt5_4Nano
+ | Self::Gpt5_3Codex
+ | Self::Gpt5_3Spark
+ | Self::Gpt5_2
+ | Self::Gpt5_2Codex
+ | Self::Gpt5_1
+ | Self::Gpt5_1Codex
+ | Self::Gpt5_1CodexMax
+ | Self::Gpt5_1CodexMini
+ | Self::Gpt5
+ | Self::Gpt5Codex
+ | Self::Gpt5Nano => true,
+
+ // Google models support images
+ Self::Gemini3_1Pro | Self::Gemini3Flash => true,
+
+ // OpenAI-compatible models — conservative default
+ Self::MiniMaxM2_5
+ | Self::MiniMaxM2_5Free
+ | Self::Glm5
+ | Self::KimiK2_5
+ | Self::MimoV2ProFree
+ | Self::MimoV2OmniFree
+ | Self::MimoV2FlashFree
+ | Self::TrinityLargePreviewFree
+ | Self::BigPickle
+ | Self::Nemotron3SuperFree => false,
+
+ Self::Custom { protocol, .. } => matches!(
+ protocol,
+ ApiProtocol::Anthropic
+ | ApiProtocol::OpenAiResponses
+ | ApiProtocol::OpenAiChat
+ | ApiProtocol::Google
+ ),
+ }
+ }
+}
+
+/// Stream generate content for Google models via OpenCode Zen.
+///
+/// Unlike `google_ai::stream_generate_content()`, this uses:
+/// - `/v1/models/{model}` path (not `/v1beta/models/{model}`)
+/// - `Authorization: Bearer` header (not `key=` query param)
+pub async fn stream_generate_content_zen(
+ client: &dyn HttpClient,
+ api_url: &str,
+ api_key: &str,
+ request: google_ai::GenerateContentRequest,
+) -> Result>> {
+ let api_key = api_key.trim();
+
+ let model_id = &request.model.model_id;
+
+ let uri = format!("{api_url}/v1/models/{model_id}:streamGenerateContent?alt=sse");
+
+ let request_builder = HttpRequest::builder()
+ .method(Method::POST)
+ .uri(uri)
+ .header("Content-Type", "application/json")
+ .header("Authorization", format!("Bearer {api_key}"));
+
+ let request = request_builder.body(AsyncBody::from(serde_json::to_string(&request)?))?;
+ let mut response = client.send(request).await?;
+ if response.status().is_success() {
+ let reader = BufReader::new(response.into_body());
+ Ok(reader
+ .lines()
+ .filter_map(|line| async move {
+ match line {
+ Ok(line) => {
+ if let Some(line) = line.strip_prefix("data: ") {
+ match serde_json::from_str(line) {
+ Ok(response) => Some(Ok(response)),
+ Err(error) => {
+ Some(Err(anyhow!("Error parsing JSON: {error:?}\n{line:?}")))
+ }
+ }
+ } else {
+ None
+ }
+ }
+ Err(error) => Some(Err(anyhow!(error))),
+ }
+ })
+ .boxed())
+ } else {
+ let mut text = String::new();
+ response.body_mut().read_to_string(&mut text).await?;
+ Err(anyhow!(
+ "error during streamGenerateContent via OpenCode Zen, status code: {:?}, body: {}",
+ response.status(),
+ text
+ ))
+ }
+}
diff --git a/crates/settings_content/src/language_model.rs b/crates/settings_content/src/language_model.rs
index 8ced6e0b487a673ff4dba34cae9c1e2c7ee45d13..fef92dc8f43d52c160c1e8c8a2fb7aeb0533e2c0 100644
--- a/crates/settings_content/src/language_model.rs
+++ b/crates/settings_content/src/language_model.rs
@@ -16,6 +16,7 @@ pub struct AllLanguageModelSettingsContent {
pub lmstudio: Option,
pub mistral: Option,
pub ollama: Option,
+ pub opencode: Option,
pub open_router: Option,
pub openai: Option,
pub openai_compatible: Option, OpenAiCompatibleSettingsContent>>,
@@ -144,6 +145,24 @@ impl Default for KeepAlive {
}
}
+#[with_fallible_options]
+#[derive(Default, Clone, Debug, Serialize, Deserialize, PartialEq, JsonSchema, MergeFrom)]
+pub struct OpenCodeSettingsContent {
+ pub api_url: Option,
+ pub available_models: Option>,
+}
+
+#[with_fallible_options]
+#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema, MergeFrom)]
+pub struct OpenCodeAvailableModel {
+ pub name: String,
+ pub display_name: Option,
+ pub max_tokens: u64,
+ pub max_output_tokens: Option,
+ /// The API protocol to use for this model: "anthropic", "openai_responses", "openai_chat", or "google".
+ pub protocol: String,
+}
+
#[with_fallible_options]
#[derive(Default, Clone, Debug, Serialize, Deserialize, PartialEq, JsonSchema, MergeFrom)]
pub struct LmStudioSettingsContent {