Detailed changes
@@ -5916,6 +5916,7 @@ dependencies = [
"http_client",
"language",
"language_extension",
+ "language_model",
"log",
"lsp",
"moka",
@@ -5938,6 +5939,7 @@ dependencies = [
"theme",
"theme_extension",
"toml 0.8.23",
+ "ui",
"url",
"util",
"wasmparser 0.221.3",
@@ -8911,6 +8913,7 @@ dependencies = [
"credentials_provider",
"deepseek",
"editor",
+ "extension",
"fs",
"futures 0.3.31",
"google_ai",
@@ -29,6 +29,7 @@ pub struct ExtensionHostProxy {
slash_command_proxy: RwLock<Option<Arc<dyn ExtensionSlashCommandProxy>>>,
context_server_proxy: RwLock<Option<Arc<dyn ExtensionContextServerProxy>>>,
debug_adapter_provider_proxy: RwLock<Option<Arc<dyn ExtensionDebugAdapterProviderProxy>>>,
+ language_model_provider_proxy: RwLock<Option<Arc<dyn ExtensionLanguageModelProviderProxy>>>,
}
impl ExtensionHostProxy {
@@ -54,6 +55,7 @@ impl ExtensionHostProxy {
slash_command_proxy: RwLock::default(),
context_server_proxy: RwLock::default(),
debug_adapter_provider_proxy: RwLock::default(),
+ language_model_provider_proxy: RwLock::default(),
}
}
@@ -90,6 +92,15 @@ impl ExtensionHostProxy {
.write()
.replace(Arc::new(proxy));
}
+
+ pub fn register_language_model_provider_proxy(
+ &self,
+ proxy: impl ExtensionLanguageModelProviderProxy,
+ ) {
+ self.language_model_provider_proxy
+ .write()
+ .replace(Arc::new(proxy));
+ }
}
pub trait ExtensionThemeProxy: Send + Sync + 'static {
@@ -375,6 +386,49 @@ pub trait ExtensionContextServerProxy: Send + Sync + 'static {
fn unregister_context_server(&self, server_id: Arc<str>, cx: &mut App);
}
+/// A boxed function that registers a language model provider with the registry.
+/// This allows extension_host to create the provider (which requires WasmExtension)
+/// and pass a registration closure to the language_models crate.
+pub type LanguageModelProviderRegistration = Box<dyn FnOnce(&mut App) + Send + Sync + 'static>;
+
+pub trait ExtensionLanguageModelProviderProxy: Send + Sync + 'static {
+ /// Register an LLM provider from an extension.
+ /// The `register_fn` closure will be called with the App context and should
+ /// register the provider with the LanguageModelRegistry.
+ fn register_language_model_provider(
+ &self,
+ provider_id: Arc<str>,
+ register_fn: LanguageModelProviderRegistration,
+ cx: &mut App,
+ );
+
+ /// Unregister an LLM provider when an extension is unloaded.
+ fn unregister_language_model_provider(&self, provider_id: Arc<str>, cx: &mut App);
+}
+
+impl ExtensionLanguageModelProviderProxy for ExtensionHostProxy {
+ fn register_language_model_provider(
+ &self,
+ provider_id: Arc<str>,
+ register_fn: LanguageModelProviderRegistration,
+ cx: &mut App,
+ ) {
+ let Some(proxy) = self.language_model_provider_proxy.read().clone() else {
+ return;
+ };
+
+ proxy.register_language_model_provider(provider_id, register_fn, cx)
+ }
+
+ fn unregister_language_model_provider(&self, provider_id: Arc<str>, cx: &mut App) {
+ let Some(proxy) = self.language_model_provider_proxy.read().clone() else {
+ return;
+ };
+
+ proxy.unregister_language_model_provider(provider_id, cx)
+ }
+}
+
impl ExtensionContextServerProxy for ExtensionHostProxy {
fn register_context_server(
&self,
@@ -93,6 +93,8 @@ pub struct ExtensionManifest {
pub debug_adapters: BTreeMap<Arc<str>, DebugAdapterManifestEntry>,
#[serde(default, skip_serializing_if = "BTreeMap::is_empty")]
pub debug_locators: BTreeMap<Arc<str>, DebugLocatorManifestEntry>,
+ #[serde(default, skip_serializing_if = "BTreeMap::is_empty")]
+ pub language_model_providers: BTreeMap<Arc<str>, LanguageModelProviderManifestEntry>,
}
impl ExtensionManifest {
@@ -288,6 +290,57 @@ pub struct DebugAdapterManifestEntry {
#[derive(Clone, PartialEq, Eq, Debug, Deserialize, Serialize)]
pub struct DebugLocatorManifestEntry {}
+/// Manifest entry for a language model provider.
+#[derive(Clone, PartialEq, Eq, Debug, Deserialize, Serialize)]
+pub struct LanguageModelProviderManifestEntry {
+ /// Display name for the provider.
+ pub name: String,
+ /// Icon name from Zed's icon set (optional).
+ #[serde(default)]
+ pub icon: Option<String>,
+ /// Default models to show even before API connection.
+ #[serde(default)]
+ pub models: Vec<LanguageModelManifestEntry>,
+ /// Authentication configuration.
+ #[serde(default)]
+ pub auth: Option<LanguageModelAuthConfig>,
+}
+
+/// Manifest entry for a language model.
+#[derive(Clone, PartialEq, Eq, Debug, Deserialize, Serialize)]
+pub struct LanguageModelManifestEntry {
+ /// Unique identifier for the model.
+ pub id: String,
+ /// Display name for the model.
+ pub name: String,
+ /// Maximum input token count.
+ #[serde(default)]
+ pub max_token_count: u64,
+ /// Maximum output tokens (optional).
+ #[serde(default)]
+ pub max_output_tokens: Option<u64>,
+ /// Whether the model supports image inputs.
+ #[serde(default)]
+ pub supports_images: bool,
+ /// Whether the model supports tool/function calling.
+ #[serde(default)]
+ pub supports_tools: bool,
+ /// Whether the model supports extended thinking/reasoning.
+ #[serde(default)]
+ pub supports_thinking: bool,
+}
+
+/// Authentication configuration for a language model provider.
+#[derive(Clone, PartialEq, Eq, Debug, Deserialize, Serialize)]
+pub struct LanguageModelAuthConfig {
+ /// Environment variable name for the API key.
+ #[serde(default)]
+ pub env_var: Option<String>,
+ /// Label to show when prompting for credentials.
+ #[serde(default)]
+ pub credential_label: Option<String>,
+}
+
impl ExtensionManifest {
pub async fn load(fs: Arc<dyn Fs>, extension_dir: &Path) -> Result<Self> {
let extension_name = extension_dir
@@ -358,6 +411,7 @@ fn manifest_from_old_manifest(
capabilities: Vec::new(),
debug_adapters: Default::default(),
debug_locators: Default::default(),
+ language_model_providers: Default::default(),
}
}
@@ -391,6 +445,7 @@ mod tests {
capabilities: vec![],
debug_adapters: Default::default(),
debug_locators: Default::default(),
+ language_model_providers: BTreeMap::default(),
}
}
@@ -17,7 +17,8 @@ pub use serde_json;
pub use wit::{
CodeLabel, CodeLabelSpan, CodeLabelSpanLiteral, Command, DownloadedFileType, EnvVars,
KeyValueStore, LanguageServerInstallationStatus, Project, Range, Worktree, download_file,
- make_file_executable,
+ llm_delete_credential, llm_get_credential, llm_get_env_var, llm_request_credential,
+ llm_store_credential, make_file_executable,
zed::extension::context_server::ContextServerConfiguration,
zed::extension::dap::{
AttachRequest, BuildTaskDefinition, BuildTaskDefinitionTemplatePayload, BuildTaskTemplate,
@@ -29,6 +30,19 @@ pub use wit::{
GithubRelease, GithubReleaseAsset, GithubReleaseOptions, github_release_by_tag_name,
latest_github_release,
},
+ zed::extension::llm_provider::{
+ CacheConfiguration as LlmCacheConfiguration, CompletionEvent as LlmCompletionEvent,
+ CompletionRequest as LlmCompletionRequest, CredentialType as LlmCredentialType,
+ ImageData as LlmImageData, MessageContent as LlmMessageContent,
+ MessageRole as LlmMessageRole, ModelCapabilities as LlmModelCapabilities,
+ ModelInfo as LlmModelInfo, ProviderInfo as LlmProviderInfo,
+ RequestMessage as LlmRequestMessage, StopReason as LlmStopReason,
+ ThinkingContent as LlmThinkingContent, TokenUsage as LlmTokenUsage,
+ ToolChoice as LlmToolChoice, ToolDefinition as LlmToolDefinition,
+ ToolInputFormat as LlmToolInputFormat, ToolResult as LlmToolResult,
+ ToolResultContent as LlmToolResultContent, ToolUse as LlmToolUse,
+ ToolUseJsonParseError as LlmToolUseJsonParseError,
+ },
zed::extension::nodejs::{
node_binary_path, npm_install_package, npm_package_installed_version,
npm_package_latest_version,
@@ -259,6 +273,79 @@ pub trait Extension: Send + Sync {
) -> Result<DebugRequest, String> {
Err("`run_dap_locator` not implemented".to_string())
}
+
+ // =========================================================================
+ // Language Model Provider Methods
+ // =========================================================================
+
+ /// Returns information about language model providers offered by this extension.
+ fn llm_providers(&self) -> Vec<LlmProviderInfo> {
+ Vec::new()
+ }
+
+ /// Returns the models available for a provider.
+ fn llm_provider_models(&self, _provider_id: &str) -> Result<Vec<LlmModelInfo>, String> {
+ Ok(Vec::new())
+ }
+
+ /// Check if the provider is authenticated.
+ fn llm_provider_is_authenticated(&self, _provider_id: &str) -> bool {
+ false
+ }
+
+ /// Attempt to authenticate the provider.
+ fn llm_provider_authenticate(&mut self, _provider_id: &str) -> Result<(), String> {
+ Err("`llm_provider_authenticate` not implemented".to_string())
+ }
+
+ /// Reset credentials for the provider.
+ fn llm_provider_reset_credentials(&mut self, _provider_id: &str) -> Result<(), String> {
+ Err("`llm_provider_reset_credentials` not implemented".to_string())
+ }
+
+ /// Count tokens for a request.
+ fn llm_count_tokens(
+ &self,
+ _provider_id: &str,
+ _model_id: &str,
+ _request: &LlmCompletionRequest,
+ ) -> Result<u64, String> {
+ Err("`llm_count_tokens` not implemented".to_string())
+ }
+
+ /// Start streaming a completion from the model.
+ /// Returns a stream ID that can be used with `llm_stream_completion_next` and `llm_stream_completion_close`.
+ fn llm_stream_completion_start(
+ &mut self,
+ _provider_id: &str,
+ _model_id: &str,
+ _request: &LlmCompletionRequest,
+ ) -> Result<String, String> {
+ Err("`llm_stream_completion_start` not implemented".to_string())
+ }
+
+ /// Get the next event from a completion stream.
+ /// Returns `Ok(None)` when the stream is complete.
+ fn llm_stream_completion_next(
+ &mut self,
+ _stream_id: &str,
+ ) -> Result<Option<LlmCompletionEvent>, String> {
+ Err("`llm_stream_completion_next` not implemented".to_string())
+ }
+
+ /// Close a completion stream and release its resources.
+ fn llm_stream_completion_close(&mut self, _stream_id: &str) {
+ // Default implementation does nothing
+ }
+
+ /// Get cache configuration for a model (if prompt caching is supported).
+ fn llm_cache_configuration(
+ &self,
+ _provider_id: &str,
+ _model_id: &str,
+ ) -> Option<LlmCacheConfiguration> {
+ None
+ }
}
/// Registers the provided type as a Zed extension.
@@ -334,7 +421,7 @@ mod wit {
wit_bindgen::generate!({
skip: ["init-extension"],
- path: "./wit/since_v0.6.0",
+ path: "./wit/since_v0.7.0",
});
}
@@ -518,6 +605,61 @@ impl wit::Guest for Component {
) -> Result<DebugRequest, String> {
extension().run_dap_locator(locator_name, build_task)
}
+
+ // =========================================================================
+ // Language Model Provider Methods
+ // =========================================================================
+
+ fn llm_providers() -> Vec<LlmProviderInfo> {
+ extension().llm_providers()
+ }
+
+ fn llm_provider_models(provider_id: String) -> Result<Vec<LlmModelInfo>, String> {
+ extension().llm_provider_models(&provider_id)
+ }
+
+ fn llm_provider_is_authenticated(provider_id: String) -> bool {
+ extension().llm_provider_is_authenticated(&provider_id)
+ }
+
+ fn llm_provider_authenticate(provider_id: String) -> Result<(), String> {
+ extension().llm_provider_authenticate(&provider_id)
+ }
+
+ fn llm_provider_reset_credentials(provider_id: String) -> Result<(), String> {
+ extension().llm_provider_reset_credentials(&provider_id)
+ }
+
+ fn llm_count_tokens(
+ provider_id: String,
+ model_id: String,
+ request: LlmCompletionRequest,
+ ) -> Result<u64, String> {
+ extension().llm_count_tokens(&provider_id, &model_id, &request)
+ }
+
+ fn llm_stream_completion_start(
+ provider_id: String,
+ model_id: String,
+ request: LlmCompletionRequest,
+ ) -> Result<String, String> {
+ extension().llm_stream_completion_start(&provider_id, &model_id, &request)
+ }
+
+ fn llm_stream_completion_next(stream_id: String) -> Result<Option<LlmCompletionEvent>, String> {
+ extension().llm_stream_completion_next(&stream_id)
+ }
+
+ fn llm_stream_completion_close(stream_id: String) {
+ extension().llm_stream_completion_close(&stream_id)
+ }
+
+ fn llm_cache_configuration(
+ provider_id: String,
+ model_id: String,
+ ) -> Option<LlmCacheConfiguration> {
+ extension().llm_cache_configuration(&provider_id, &model_id)
+ }
}
/// The ID of a language server.
@@ -0,0 +1,12 @@
+interface common {
+ /// A (half-open) range (`[start, end)`).
+ record range {
+ /// The start of the range (inclusive).
+ start: u32,
+ /// The end of the range (exclusive).
+ end: u32,
+ }
+
+ /// A list of environment variables.
+ type env-vars = list<tuple<string, string>>;
+}
@@ -0,0 +1,11 @@
+interface context-server {
+ /// Configuration for context server setup and installation.
+ record context-server-configuration {
+ /// Installation instructions in Markdown format.
+ installation-instructions: string,
+ /// JSON schema for settings validation.
+ settings-schema: string,
+ /// Default settings template.
+ default-settings: string,
+ }
+}
@@ -0,0 +1,123 @@
+interface dap {
+ use common.{env-vars};
+
+ /// Resolves a specified TcpArgumentsTemplate into TcpArguments
+ resolve-tcp-template: func(template: tcp-arguments-template) -> result<tcp-arguments, string>;
+
+ record launch-request {
+ program: string,
+ cwd: option<string>,
+ args: list<string>,
+ envs: env-vars,
+ }
+
+ record attach-request {
+ process-id: option<u32>,
+ }
+
+ variant debug-request {
+ launch(launch-request),
+ attach(attach-request)
+ }
+
+ record tcp-arguments {
+ port: u16,
+ host: u32,
+ timeout: option<u64>,
+ }
+
+ record tcp-arguments-template {
+ port: option<u16>,
+ host: option<u32>,
+ timeout: option<u64>,
+ }
+
+ /// Debug Config is the "highest-level" configuration for a debug session.
+ /// It comes from a new process modal UI; thus, it is essentially debug-adapter-agnostic.
+ /// It is expected of the extension to translate this generic configuration into something that can be debugged by the adapter (debug scenario).
+ record debug-config {
+ /// Name of the debug task
+ label: string,
+ /// The debug adapter to use
+ adapter: string,
+ request: debug-request,
+ stop-on-entry: option<bool>,
+ }
+
+ record task-template {
+ /// Human readable name of the task to display in the UI.
+ label: string,
+ /// Executable command to spawn.
+ command: string,
+ args: list<string>,
+ env: env-vars,
+ cwd: option<string>,
+ }
+
+ /// A task template with substituted task variables.
+ type resolved-task = task-template;
+
+ /// A task template for building a debug target.
+ type build-task-template = task-template;
+
+ variant build-task-definition {
+ by-name(string),
+ template(build-task-definition-template-payload )
+ }
+ record build-task-definition-template-payload {
+ locator-name: option<string>,
+ template: build-task-template
+ }
+
+ /// Debug Scenario is the user-facing configuration type (used in debug.json). It is still concerned with what to debug and not necessarily how to do it (except for any
+ /// debug-adapter-specific configuration options).
+ record debug-scenario {
+ /// Unsubstituted label for the task.DebugAdapterBinary
+ label: string,
+ /// Name of the Debug Adapter this configuration is intended for.
+ adapter: string,
+ /// An optional build step to be ran prior to starting a debug session. Build steps are used by Zed's locators to locate the executable to debug.
+ build: option<build-task-definition>,
+ /// JSON-encoded configuration for a given debug adapter.
+ config: string,
+ /// TCP connection parameters (if they were specified by user)
+ tcp-connection: option<tcp-arguments-template>,
+ }
+
+ enum start-debugging-request-arguments-request {
+ launch,
+ attach,
+ }
+
+ record debug-task-definition {
+ /// Unsubstituted label for the task.DebugAdapterBinary
+ label: string,
+ /// Name of the Debug Adapter this configuration is intended for.
+ adapter: string,
+ /// JSON-encoded configuration for a given debug adapter.
+ config: string,
+ /// TCP connection parameters (if they were specified by user)
+ tcp-connection: option<tcp-arguments-template>,
+ }
+
+ record start-debugging-request-arguments {
+ /// JSON-encoded configuration for a given debug adapter. It is specific to each debug adapter.
+ /// `configuration` will have it's Zed variable references substituted prior to being passed to the debug adapter.
+ configuration: string,
+ request: start-debugging-request-arguments-request,
+ }
+
+ /// The lowest-level representation of a debug session, which specifies:
+ /// - How to start a debug adapter process
+ /// - How to start a debug session with it (using DAP protocol)
+ /// for a given debug scenario.
+ record debug-adapter-binary {
+ command: option<string>,
+ arguments: list<string>,
+ envs: env-vars,
+ cwd: option<string>,
+ /// Zed will use TCP transport if `connection` is specified.
+ connection: option<tcp-arguments>,
+ request-args: start-debugging-request-arguments
+ }
+}
@@ -0,0 +1,248 @@
+package zed:extension;
+
+world extension {
+ import context-server;
+ import dap;
+ import github;
+ import http-client;
+ import platform;
+ import process;
+ import nodejs;
+ import llm-provider;
+
+ use common.{env-vars, range};
+ use context-server.{context-server-configuration};
+ use dap.{attach-request, build-task-template, debug-config, debug-adapter-binary, debug-task-definition, debug-request, debug-scenario, launch-request, resolved-task, start-debugging-request-arguments-request};
+ use lsp.{completion, symbol};
+ use process.{command};
+ use slash-command.{slash-command, slash-command-argument-completion, slash-command-output};
+ use llm-provider.{
+ provider-info, model-info, completion-request,
+ credential-type, cache-configuration, completion-event, token-usage
+ };
+
+ /// Initializes the extension.
+ export init-extension: func();
+
+ /// The type of a downloaded file.
+ enum downloaded-file-type {
+ /// A gzipped file (`.gz`).
+ gzip,
+ /// A gzipped tar archive (`.tar.gz`).
+ gzip-tar,
+ /// A ZIP file (`.zip`).
+ zip,
+ /// An uncompressed file.
+ uncompressed,
+ }
+
+ /// The installation status for a language server.
+ variant language-server-installation-status {
+ /// The language server has no installation status.
+ none,
+ /// The language server is being downloaded.
+ downloading,
+ /// The language server is checking for updates.
+ checking-for-update,
+ /// The language server installation failed for specified reason.
+ failed(string),
+ }
+
+ record settings-location {
+ worktree-id: u64,
+ path: string,
+ }
+
+ import get-settings: func(path: option<settings-location>, category: string, key: option<string>) -> result<string, string>;
+
+ /// Downloads a file from the given URL and saves it to the given path within the extension's
+ /// working directory.
+ ///
+ /// The file will be extracted according to the given file type.
+ import download-file: func(url: string, file-path: string, file-type: downloaded-file-type) -> result<_, string>;
+
+ /// Makes the file at the given path executable.
+ import make-file-executable: func(filepath: string) -> result<_, string>;
+
+ /// Updates the installation status for the given language server.
+ import set-language-server-installation-status: func(language-server-name: string, status: language-server-installation-status);
+
+ /// A Zed worktree.
+ resource worktree {
+ /// Returns the ID of the worktree.
+ id: func() -> u64;
+ /// Returns the root path of the worktree.
+ root-path: func() -> string;
+ /// Returns the textual contents of the specified file in the worktree.
+ read-text-file: func(path: string) -> result<string, string>;
+ /// Returns the path to the given binary name, if one is present on the `$PATH`.
+ which: func(binary-name: string) -> option<string>;
+ /// Returns the current shell environment.
+ shell-env: func() -> env-vars;
+ }
+
+ /// A Zed project.
+ resource project {
+ /// Returns the IDs of all of the worktrees in this project.
+ worktree-ids: func() -> list<u64>;
+ }
+
+ /// A key-value store.
+ resource key-value-store {
+ /// Inserts an entry under the specified key.
+ insert: func(key: string, value: string) -> result<_, string>;
+ }
+
+ /// Returns the command used to start up the language server.
+ export language-server-command: func(language-server-id: string, worktree: borrow<worktree>) -> result<command, string>;
+
+ /// Returns the initialization options to pass to the language server on startup.
+ ///
+ /// The initialization options are represented as a JSON string.
+ export language-server-initialization-options: func(language-server-id: string, worktree: borrow<worktree>) -> result<option<string>, string>;
+
+ /// Returns the workspace configuration options to pass to the language server.
+ export language-server-workspace-configuration: func(language-server-id: string, worktree: borrow<worktree>) -> result<option<string>, string>;
+
+ /// Returns the initialization options to pass to the other language server.
+ export language-server-additional-initialization-options: func(language-server-id: string, target-language-server-id: string, worktree: borrow<worktree>) -> result<option<string>, string>;
+
+ /// Returns the workspace configuration options to pass to the other language server.
+ export language-server-additional-workspace-configuration: func(language-server-id: string, target-language-server-id: string, worktree: borrow<worktree>) -> result<option<string>, string>;
+
+ /// A label containing some code.
+ record code-label {
+ /// The source code to parse with Tree-sitter.
+ code: string,
+ /// The spans to display in the label.
+ spans: list<code-label-span>,
+ /// The range of the displayed label to include when filtering.
+ filter-range: range,
+ }
+
+ /// A span within a code label.
+ variant code-label-span {
+ /// A range into the parsed code.
+ code-range(range),
+ /// A span containing a code literal.
+ literal(code-label-span-literal),
+ }
+
+ /// A span containing a code literal.
+ record code-label-span-literal {
+ /// The literal text.
+ text: string,
+ /// The name of the highlight to use for this literal.
+ highlight-name: option<string>,
+ }
+
+ export labels-for-completions: func(language-server-id: string, completions: list<completion>) -> result<list<option<code-label>>, string>;
+ export labels-for-symbols: func(language-server-id: string, symbols: list<symbol>) -> result<list<option<code-label>>, string>;
+
+
+ /// Returns the completions that should be shown when completing the provided slash command with the given query.
+ export complete-slash-command-argument: func(command: slash-command, args: list<string>) -> result<list<slash-command-argument-completion>, string>;
+
+ /// Returns the output from running the provided slash command.
+ export run-slash-command: func(command: slash-command, args: list<string>, worktree: option<borrow<worktree>>) -> result<slash-command-output, string>;
+
+ /// Returns the command used to start up a context server.
+ export context-server-command: func(context-server-id: string, project: borrow<project>) -> result<command, string>;
+
+ /// Returns the configuration for a context server.
+ export context-server-configuration: func(context-server-id: string, project: borrow<project>) -> result<option<context-server-configuration>, string>;
+
+ /// Returns a list of packages as suggestions to be included in the `/docs`
+ /// search results.
+ ///
+ /// This can be used to provide completions for known packages (e.g., from the
+ /// local project or a registry) before a package has been indexed.
+ export suggest-docs-packages: func(provider-name: string) -> result<list<string>, string>;
+
+ /// Indexes the docs for the specified package.
+ export index-docs: func(provider-name: string, package-name: string, database: borrow<key-value-store>) -> result<_, string>;
+
+ /// Returns a configured debug adapter binary for a given debug task.
+ export get-dap-binary: func(adapter-name: string, config: debug-task-definition, user-installed-path: option<string>, worktree: borrow<worktree>) -> result<debug-adapter-binary, string>;
+ /// Returns the kind of a debug scenario (launch or attach).
+ export dap-request-kind: func(adapter-name: string, config: string) -> result<start-debugging-request-arguments-request, string>;
+ export dap-config-to-scenario: func(config: debug-config) -> result<debug-scenario, string>;
+ export dap-locator-create-scenario: func(locator-name: string, build-config-template: build-task-template, resolved-label: string, debug-adapter-name: string) -> option<debug-scenario>;
+ export run-dap-locator: func(locator-name: string, config: resolved-task) -> result<debug-request, string>;
+
+ // =========================================================================
+ // Language Model Provider Extension API
+ // =========================================================================
+
+ /// Returns information about language model providers offered by this extension.
+ export llm-providers: func() -> list<provider-info>;
+
+ /// Returns the models available for a provider.
+ export llm-provider-models: func(provider-id: string) -> result<list<model-info>, string>;
+
+ /// Check if the provider is authenticated.
+ export llm-provider-is-authenticated: func(provider-id: string) -> bool;
+
+ /// Attempt to authenticate the provider.
+ export llm-provider-authenticate: func(provider-id: string) -> result<_, string>;
+
+ /// Reset credentials for the provider.
+ export llm-provider-reset-credentials: func(provider-id: string) -> result<_, string>;
+
+ /// Count tokens for a request.
+ export llm-count-tokens: func(
+ provider-id: string,
+ model-id: string,
+ request: completion-request
+ ) -> result<u64, string>;
+
+ /// Start streaming a completion from the model.
+ /// Returns a stream ID that can be used with llm-stream-next and llm-stream-close.
+ export llm-stream-completion-start: func(
+ provider-id: string,
+ model-id: string,
+ request: completion-request
+ ) -> result<string, string>;
+
+ /// Get the next event from a completion stream.
+ /// Returns None when the stream is complete.
+ export llm-stream-completion-next: func(
+ stream-id: string
+ ) -> result<option<completion-event>, string>;
+
+ /// Close a completion stream and release its resources.
+ export llm-stream-completion-close: func(
+ stream-id: string
+ );
+
+ /// Get cache configuration for a model (if prompt caching is supported).
+ export llm-cache-configuration: func(
+ provider-id: string,
+ model-id: string
+ ) -> option<cache-configuration>;
+
+ // =========================================================================
+ // Language Model Provider Imports (callable by extensions)
+ // =========================================================================
+
+ /// Request a credential from the user.
+ /// Returns true if the credential was provided, false if the user cancelled.
+ import llm-request-credential: func(
+ provider-id: string,
+ credential-type: credential-type,
+ label: string,
+ placeholder: string
+ ) -> result<bool, string>;
+
+ /// Get a stored credential for this provider.
+ import llm-get-credential: func(provider-id: string) -> option<string>;
+
+ /// Store a credential for this provider.
+ import llm-store-credential: func(provider-id: string, value: string) -> result<_, string>;
+
+ /// Delete a stored credential for this provider.
+ import llm-delete-credential: func(provider-id: string) -> result<_, string>;
+
+ /// Read an environment variable.
+ import llm-get-env-var: func(name: string) -> option<string>;
+}
@@ -0,0 +1,35 @@
+interface github {
+ /// A GitHub release.
+ record github-release {
+ /// The version of the release.
+ version: string,
+ /// The list of assets attached to the release.
+ assets: list<github-release-asset>,
+ }
+
+ /// An asset from a GitHub release.
+ record github-release-asset {
+ /// The name of the asset.
+ name: string,
+ /// The download URL for the asset.
+ download-url: string,
+ }
+
+ /// The options used to filter down GitHub releases.
+ record github-release-options {
+ /// Whether releases without assets should be included.
+ require-assets: bool,
+ /// Whether pre-releases should be included.
+ pre-release: bool,
+ }
+
+ /// Returns the latest release for the given GitHub repository.
+ ///
+ /// Takes repo as a string in the form "<owner-name>/<repo-name>", for example: "zed-industries/zed".
+ latest-github-release: func(repo: string, options: github-release-options) -> result<github-release, string>;
+
+ /// Returns the GitHub release with the specified tag name for the given GitHub repository.
+ ///
+ /// Returns an error if a release with the given tag name does not exist.
+ github-release-by-tag-name: func(repo: string, tag: string) -> result<github-release, string>;
+}
@@ -0,0 +1,67 @@
+interface http-client {
+ /// An HTTP request.
+ record http-request {
+ /// The HTTP method for the request.
+ method: http-method,
+ /// The URL to which the request should be made.
+ url: string,
+ /// The headers for the request.
+ headers: list<tuple<string, string>>,
+ /// The request body.
+ body: option<list<u8>>,
+ /// The policy to use for redirects.
+ redirect-policy: redirect-policy,
+ }
+
+ /// HTTP methods.
+ enum http-method {
+ /// `GET`
+ get,
+ /// `HEAD`
+ head,
+ /// `POST`
+ post,
+ /// `PUT`
+ put,
+ /// `DELETE`
+ delete,
+ /// `OPTIONS`
+ options,
+ /// `PATCH`
+ patch,
+ }
+
+ /// The policy for dealing with redirects received from the server.
+ variant redirect-policy {
+ /// Redirects from the server will not be followed.
+ ///
+ /// This is the default behavior.
+ no-follow,
+ /// Redirects from the server will be followed up to the specified limit.
+ follow-limit(u32),
+ /// All redirects from the server will be followed.
+ follow-all,
+ }
+
+ /// An HTTP response.
+ record http-response {
+ /// The response headers.
+ headers: list<tuple<string, string>>,
+ /// The response body.
+ body: list<u8>,
+ }
+
+ /// Performs an HTTP request and returns the response.
+ fetch: func(req: http-request) -> result<http-response, string>;
+
+ /// An HTTP response stream.
+ resource http-response-stream {
+ /// Retrieves the next chunk of data from the response stream.
+ ///
+ /// Returns `Ok(None)` if the stream has ended.
+ next-chunk: func() -> result<option<list<u8>>, string>;
+ }
+
+ /// Performs an HTTP request and returns a response stream.
+ fetch-stream: func(req: http-request) -> result<http-response-stream, string>;
+}
@@ -0,0 +1,255 @@
+interface llm-provider {
+ /// Information about a language model provider.
+ record provider-info {
+ /// Unique identifier for the provider (e.g., "my-extension.my-provider").
+ id: string,
+ /// Display name for the provider.
+ name: string,
+ /// Icon name from Zed's icon set (optional).
+ icon: option<string>,
+ }
+
+ /// Capabilities of a language model.
+ record model-capabilities {
+ /// Whether the model supports image inputs.
+ supports-images: bool,
+ /// Whether the model supports tool/function calling.
+ supports-tools: bool,
+ /// Whether the model supports the "auto" tool choice.
+ supports-tool-choice-auto: bool,
+ /// Whether the model supports the "any" tool choice.
+ supports-tool-choice-any: bool,
+ /// Whether the model supports the "none" tool choice.
+ supports-tool-choice-none: bool,
+ /// Whether the model supports extended thinking/reasoning.
+ supports-thinking: bool,
+ /// The format for tool input schemas.
+ tool-input-format: tool-input-format,
+ }
+
+ /// Format for tool input schemas.
+ enum tool-input-format {
+ /// Standard JSON Schema format.
+ json-schema,
+ /// Simplified schema format for certain providers.
+ simplified,
+ }
+
+ /// Information about a specific model.
+ record model-info {
+ /// Unique identifier for the model.
+ id: string,
+ /// Display name for the model.
+ name: string,
+ /// Maximum input token count.
+ max-token-count: u64,
+ /// Maximum output tokens (optional).
+ max-output-tokens: option<u64>,
+ /// Model capabilities.
+ capabilities: model-capabilities,
+ /// Whether this is the default model for the provider.
+ is-default: bool,
+ /// Whether this is the default fast model.
+ is-default-fast: bool,
+ }
+
+ /// The role of a message participant.
+ enum message-role {
+ /// User message.
+ user,
+ /// Assistant message.
+ assistant,
+ /// System message.
+ system,
+ }
+
+ /// A message in a completion request.
+ record request-message {
+ /// The role of the message sender.
+ role: message-role,
+ /// The content of the message.
+ content: list<message-content>,
+ /// Whether to cache this message for prompt caching.
+ cache: bool,
+ }
+
+ /// Content within a message.
+ variant message-content {
+ /// Plain text content.
+ text(string),
+ /// Image content.
+ image(image-data),
+ /// A tool use request from the assistant.
+ tool-use(tool-use),
+ /// A tool result from the user.
+ tool-result(tool-result),
+ /// Thinking/reasoning content.
+ thinking(thinking-content),
+ /// Redacted/encrypted thinking content.
+ redacted-thinking(string),
+ }
+
+ /// Image data for vision models.
+ record image-data {
+ /// Base64-encoded image data.
+ source: string,
+ /// Image width in pixels (optional).
+ width: option<u32>,
+ /// Image height in pixels (optional).
+ height: option<u32>,
+ }
+
+ /// A tool use request from the model.
+ record tool-use {
+ /// Unique identifier for this tool use.
+ id: string,
+ /// The name of the tool being used.
+ name: string,
+ /// JSON string of the tool input arguments.
+ input: string,
+ /// Thought signature for providers that support it (e.g., Anthropic).
+ thought-signature: option<string>,
+ }
+
+ /// A tool result to send back to the model.
+ record tool-result {
+ /// The ID of the tool use this is a result for.
+ tool-use-id: string,
+ /// The name of the tool.
+ tool-name: string,
+ /// Whether this result represents an error.
+ is-error: bool,
+ /// The content of the result.
+ content: tool-result-content,
+ }
+
+ /// Content of a tool result.
+ variant tool-result-content {
+ /// Text result.
+ text(string),
+ /// Image result.
+ image(image-data),
+ }
+
+ /// Thinking/reasoning content from models that support extended thinking.
+ record thinking-content {
+ /// The thinking text.
+ text: string,
+ /// Signature for the thinking block (provider-specific).
+ signature: option<string>,
+ }
+
+ /// A tool definition for function calling.
+ record tool-definition {
+ /// The name of the tool.
+ name: string,
+ /// Description of what the tool does.
+ description: string,
+ /// JSON Schema for input parameters.
+ input-schema: string,
+ }
+
+ /// Tool choice preference for the model.
+ enum tool-choice {
+ /// Let the model decide whether to use tools.
+ auto,
+ /// Force the model to use at least one tool.
+ any,
+ /// Prevent the model from using tools.
+ none,
+ }
+
+ /// A completion request to send to the model.
+ record completion-request {
+ /// The messages in the conversation.
+ messages: list<request-message>,
+ /// Available tools for the model to use.
+ tools: list<tool-definition>,
+ /// Tool choice preference.
+ tool-choice: option<tool-choice>,
+ /// Stop sequences to end generation.
+ stop-sequences: list<string>,
+ /// Temperature for sampling (0.0-1.0).
+ temperature: option<f32>,
+ /// Whether thinking/reasoning is allowed.
+ thinking-allowed: bool,
+ /// Maximum tokens to generate.
+ max-tokens: option<u64>,
+ }
+
+ /// Events emitted during completion streaming.
+ variant completion-event {
+ /// Completion has started.
+ started,
+ /// Text content chunk.
+ text(string),
+ /// Thinking/reasoning content chunk.
+ thinking(thinking-content),
+ /// Redacted thinking (encrypted) chunk.
+ redacted-thinking(string),
+ /// Tool use request from the model.
+ tool-use(tool-use),
+ /// JSON parse error when parsing tool input.
+ tool-use-json-parse-error(tool-use-json-parse-error),
+ /// Completion stopped.
+ stop(stop-reason),
+ /// Token usage update.
+ usage(token-usage),
+ /// Reasoning details (provider-specific JSON).
+ reasoning-details(string),
+ }
+
+ /// Error information when tool use JSON parsing fails.
+ record tool-use-json-parse-error {
+ /// The tool use ID.
+ id: string,
+ /// The tool name.
+ tool-name: string,
+ /// The raw input that failed to parse.
+ raw-input: string,
+ /// The parse error message.
+ error: string,
+ }
+
+ /// Reason the completion stopped.
+ enum stop-reason {
+ /// The model finished generating.
+ end-turn,
+ /// Maximum tokens reached.
+ max-tokens,
+ /// The model wants to use a tool.
+ tool-use,
+ /// The model refused to respond.
+ refusal,
+ }
+
+ /// Token usage statistics.
+ record token-usage {
+ /// Number of input tokens used.
+ input-tokens: u64,
+ /// Number of output tokens generated.
+ output-tokens: u64,
+ /// Tokens used for cache creation (if supported).
+ cache-creation-input-tokens: option<u64>,
+ /// Tokens read from cache (if supported).
+ cache-read-input-tokens: option<u64>,
+ }
+
+ /// Credential types that can be requested.
+ enum credential-type {
+ /// An API key.
+ api-key,
+ /// An OAuth token.
+ oauth-token,
+ }
+
+ /// Cache configuration for prompt caching.
+ record cache-configuration {
+ /// Maximum number of cache anchors.
+ max-cache-anchors: u32,
+ /// Whether caching should be applied to tool definitions.
+ should-cache-tool-definitions: bool,
+ /// Minimum token count for a message to be cached.
+ min-total-token-count: u64,
+ }
+}
@@ -0,0 +1,90 @@
+interface lsp {
+ /// An LSP completion.
+ record completion {
+ label: string,
+ label-details: option<completion-label-details>,
+ detail: option<string>,
+ kind: option<completion-kind>,
+ insert-text-format: option<insert-text-format>,
+ }
+
+ /// The kind of an LSP completion.
+ variant completion-kind {
+ text,
+ method,
+ function,
+ %constructor,
+ field,
+ variable,
+ class,
+ %interface,
+ module,
+ property,
+ unit,
+ value,
+ %enum,
+ keyword,
+ snippet,
+ color,
+ file,
+ reference,
+ folder,
+ enum-member,
+ constant,
+ struct,
+ event,
+ operator,
+ type-parameter,
+ other(s32),
+ }
+
+ /// Label details for an LSP completion.
+ record completion-label-details {
+ detail: option<string>,
+ description: option<string>,
+ }
+
+ /// Defines how to interpret the insert text in a completion item.
+ variant insert-text-format {
+ plain-text,
+ snippet,
+ other(s32),
+ }
+
+ /// An LSP symbol.
+ record symbol {
+ kind: symbol-kind,
+ name: string,
+ }
+
+ /// The kind of an LSP symbol.
+ variant symbol-kind {
+ file,
+ module,
+ namespace,
+ %package,
+ class,
+ method,
+ property,
+ field,
+ %constructor,
+ %enum,
+ %interface,
+ function,
+ variable,
+ constant,
+ %string,
+ number,
+ boolean,
+ array,
+ object,
+ key,
+ null,
+ enum-member,
+ struct,
+ event,
+ operator,
+ type-parameter,
+ other(s32),
+ }
+}
@@ -0,0 +1,13 @@
+interface nodejs {
+ /// Returns the path to the Node binary used by Zed.
+ node-binary-path: func() -> result<string, string>;
+
+ /// Returns the latest version of the given NPM package.
+ npm-package-latest-version: func(package-name: string) -> result<string, string>;
+
+ /// Returns the installed version of the given NPM package, if it exists.
+ npm-package-installed-version: func(package-name: string) -> result<option<string>, string>;
+
+ /// Installs the specified NPM package.
+ npm-install-package: func(package-name: string, version: string) -> result<_, string>;
+}
@@ -0,0 +1,24 @@
+interface platform {
+ /// An operating system.
+ enum os {
+ /// macOS.
+ mac,
+ /// Linux.
+ linux,
+ /// Windows.
+ windows,
+ }
+
+ /// A platform architecture.
+ enum architecture {
+ /// AArch64 (e.g., Apple Silicon).
+ aarch64,
+ /// x86.
+ x86,
+ /// x86-64.
+ x8664,
+ }
+
+ /// Gets the current operating system and architecture.
+ current-platform: func() -> tuple<os, architecture>;
+}
@@ -0,0 +1,29 @@
+interface process {
+ use common.{env-vars};
+
+ /// A command.
+ record command {
+ /// The command to execute.
+ command: string,
+ /// The arguments to pass to the command.
+ args: list<string>,
+ /// The environment variables to set for the command.
+ env: env-vars,
+ }
+
+ /// The output of a finished process.
+ record output {
+ /// The status (exit code) of the process.
+ ///
+ /// On Unix, this will be `None` if the process was terminated by a signal.
+ status: option<s32>,
+ /// The data that the process wrote to stdout.
+ stdout: list<u8>,
+ /// The data that the process wrote to stderr.
+ stderr: list<u8>,
+ }
+
+ /// Executes the given command as a child process, waiting for it to finish
+ /// and collecting all of its output.
+ run-command: func(command: command) -> result<output, string>;
+}
@@ -0,0 +1,40 @@
+use serde::{Deserialize, Serialize};
+use std::{collections::HashMap, num::NonZeroU32};
+
+/// The settings for a particular language.
+#[derive(Debug, Serialize, Deserialize)]
+pub struct LanguageSettings {
+ /// How many columns a tab should occupy.
+ pub tab_size: NonZeroU32,
+}
+
+/// The settings for a particular language server.
+#[derive(Default, Debug, Serialize, Deserialize)]
+pub struct LspSettings {
+ /// The settings for the language server binary.
+ pub binary: Option<CommandSettings>,
+ /// The initialization options to pass to the language server.
+ pub initialization_options: Option<serde_json::Value>,
+ /// The settings to pass to language server.
+ pub settings: Option<serde_json::Value>,
+}
+
+/// The settings for a particular context server.
+#[derive(Default, Debug, Serialize, Deserialize, PartialEq, Eq)]
+pub struct ContextServerSettings {
+ /// The settings for the context server binary.
+ pub command: Option<CommandSettings>,
+ /// The settings to pass to the context server.
+ pub settings: Option<serde_json::Value>,
+}
+
+/// The settings for a command.
+#[derive(Debug, Serialize, Deserialize, PartialEq, Eq)]
+pub struct CommandSettings {
+ /// The path to the command.
+ pub path: Option<String>,
+ /// The arguments to pass to the command.
+ pub arguments: Option<Vec<String>>,
+ /// The environment variables.
+ pub env: Option<HashMap<String, String>>,
+}
@@ -0,0 +1,41 @@
+interface slash-command {
+ use common.{range};
+
+ /// A slash command for use in the Assistant.
+ record slash-command {
+ /// The name of the slash command.
+ name: string,
+ /// The description of the slash command.
+ description: string,
+ /// The tooltip text to display for the run button.
+ tooltip-text: string,
+ /// Whether this slash command requires an argument.
+ requires-argument: bool,
+ }
+
+ /// The output of a slash command.
+ record slash-command-output {
+ /// The text produced by the slash command.
+ text: string,
+ /// The list of sections to show in the slash command placeholder.
+ sections: list<slash-command-output-section>,
+ }
+
+ /// A section in the slash command output.
+ record slash-command-output-section {
+ /// The range this section occupies.
+ range: range,
+ /// The label to display in the placeholder for this section.
+ label: string,
+ }
+
+ /// A completion for a slash command argument.
+ record slash-command-argument-completion {
+ /// The label to display for this completion.
+ label: string,
+ /// The new text that should be inserted into the command when this completion is accepted.
+ new-text: string,
+ /// Whether the command should be run when accepting this completion.
+ run-command: bool,
+ }
+}
@@ -30,6 +30,7 @@ gpui.workspace = true
gpui_tokio.workspace = true
http_client.workspace = true
language.workspace = true
+language_model.workspace = true
log.workspace = true
lsp.workspace = true
moka.workspace = true
@@ -47,6 +48,7 @@ task.workspace = true
telemetry.workspace = true
tempfile.workspace = true
toml.workspace = true
+ui.workspace = true
url.workspace = true
util.workspace = true
wasmparser.workspace = true
@@ -143,6 +143,7 @@ fn manifest() -> ExtensionManifest {
)],
debug_adapters: Default::default(),
debug_locators: Default::default(),
+ language_model_providers: BTreeMap::default(),
}
}
@@ -113,6 +113,7 @@ mod tests {
capabilities: vec![],
debug_adapters: Default::default(),
debug_locators: Default::default(),
+ language_model_providers: BTreeMap::default(),
}
}
@@ -16,9 +16,9 @@ pub use extension::ExtensionManifest;
use extension::extension_builder::{CompileExtensionOptions, ExtensionBuilder};
use extension::{
ExtensionContextServerProxy, ExtensionDebugAdapterProviderProxy, ExtensionEvents,
- ExtensionGrammarProxy, ExtensionHostProxy, ExtensionLanguageProxy,
- ExtensionLanguageServerProxy, ExtensionSlashCommandProxy, ExtensionSnippetProxy,
- ExtensionThemeProxy,
+ ExtensionGrammarProxy, ExtensionHostProxy, ExtensionLanguageModelProviderProxy,
+ ExtensionLanguageProxy, ExtensionLanguageServerProxy, ExtensionSlashCommandProxy,
+ ExtensionSnippetProxy, ExtensionThemeProxy,
};
use fs::{Fs, RemoveOptions};
use futures::future::join_all;
@@ -57,9 +57,10 @@ use std::{
};
use url::Url;
use util::{ResultExt, paths::RemotePathBuf};
+use wasm_host::llm_provider::ExtensionLanguageModelProvider;
use wasm_host::{
WasmExtension, WasmHost,
- wit::{is_supported_wasm_api_version, wasm_api_version_range},
+ wit::{LlmModelInfo, LlmProviderInfo, is_supported_wasm_api_version, wasm_api_version_range},
};
pub use extension::{
@@ -1217,6 +1218,11 @@ impl ExtensionStore {
for command_name in extension.manifest.slash_commands.keys() {
self.proxy.unregister_slash_command(command_name.clone());
}
+ for provider_id in extension.manifest.language_model_providers.keys() {
+ let full_provider_id: Arc<str> = format!("{}:{}", extension_id, provider_id).into();
+ self.proxy
+ .unregister_language_model_provider(full_provider_id, cx);
+ }
}
self.wasm_extensions
@@ -1355,7 +1361,11 @@ impl ExtensionStore {
})
.await;
- let mut wasm_extensions = Vec::new();
+ let mut wasm_extensions: Vec<(
+ Arc<ExtensionManifest>,
+ WasmExtension,
+ Vec<(LlmProviderInfo, Vec<LlmModelInfo>)>,
+ )> = Vec::new();
for extension in extension_entries {
if extension.manifest.lib.kind.is_none() {
continue;
@@ -1373,7 +1383,71 @@ impl ExtensionStore {
match wasm_extension {
Ok(wasm_extension) => {
- wasm_extensions.push((extension.manifest.clone(), wasm_extension))
+ // Query for LLM providers if the manifest declares any
+ let mut llm_providers_with_models = Vec::new();
+ if !extension.manifest.language_model_providers.is_empty() {
+ let providers_result = wasm_extension
+ .call(|ext, store| {
+ async move { ext.call_llm_providers(store).await }.boxed()
+ })
+ .await;
+
+ if let Ok(Ok(providers)) = providers_result {
+ for provider_info in providers {
+ let models_result = wasm_extension
+ .call({
+ let provider_id = provider_info.id.clone();
+ |ext, store| {
+ async move {
+ ext.call_llm_provider_models(store, &provider_id)
+ .await
+ }
+ .boxed()
+ }
+ })
+ .await;
+
+ let models: Vec<LlmModelInfo> = match models_result {
+ Ok(Ok(Ok(models))) => models,
+ Ok(Ok(Err(e))) => {
+ log::error!(
+ "Failed to get models for LLM provider {} in extension {}: {}",
+ provider_info.id,
+ extension.manifest.id,
+ e
+ );
+ Vec::new()
+ }
+ Ok(Err(e)) => {
+ log::error!(
+ "Wasm error calling llm_provider_models for {} in extension {}: {:?}",
+ provider_info.id,
+ extension.manifest.id,
+ e
+ );
+ Vec::new()
+ }
+ Err(e) => {
+ log::error!(
+ "Extension call failed for llm_provider_models {} in extension {}: {:?}",
+ provider_info.id,
+ extension.manifest.id,
+ e
+ );
+ Vec::new()
+ }
+ };
+
+ llm_providers_with_models.push((provider_info, models));
+ }
+ }
+ }
+
+ wasm_extensions.push((
+ extension.manifest.clone(),
+ wasm_extension,
+ llm_providers_with_models,
+ ))
}
Err(e) => {
log::error!(
@@ -1392,7 +1466,7 @@ impl ExtensionStore {
this.update(cx, |this, cx| {
this.reload_complete_senders.clear();
- for (manifest, wasm_extension) in &wasm_extensions {
+ for (manifest, wasm_extension, llm_providers_with_models) in &wasm_extensions {
let extension = Arc::new(wasm_extension.clone());
for (language_server_id, language_server_config) in &manifest.language_servers {
@@ -1446,9 +1520,38 @@ impl ExtensionStore {
this.proxy
.register_debug_locator(extension.clone(), debug_adapter.clone());
}
+
+ // Register LLM providers
+ for (provider_info, models) in llm_providers_with_models {
+ let provider_id: Arc<str> =
+ format!("{}:{}", manifest.id, provider_info.id).into();
+ let wasm_ext = wasm_extension.clone();
+ let pinfo = provider_info.clone();
+ let mods = models.clone();
+
+ this.proxy.register_language_model_provider(
+ provider_id,
+ Box::new(move |cx: &mut App| {
+ let provider = Arc::new(ExtensionLanguageModelProvider::new(
+ wasm_ext, pinfo, mods, cx,
+ ));
+ language_model::LanguageModelRegistry::global(cx).update(
+ cx,
+ |registry, cx| {
+ registry.register_provider(provider, cx);
+ },
+ );
+ }),
+ cx,
+ );
+ }
}
- this.wasm_extensions.extend(wasm_extensions);
+ let wasm_extensions_without_llm: Vec<_> = wasm_extensions
+ .into_iter()
+ .map(|(manifest, ext, _)| (manifest, ext))
+ .collect();
+ this.wasm_extensions.extend(wasm_extensions_without_llm);
this.proxy.set_extensions_loaded();
this.proxy.reload_current_theme(cx);
this.proxy.reload_current_icon_theme(cx);
@@ -165,6 +165,7 @@ async fn test_extension_store(cx: &mut TestAppContext) {
capabilities: Vec::new(),
debug_adapters: Default::default(),
debug_locators: Default::default(),
+ language_model_providers: BTreeMap::default(),
}),
dev: false,
},
@@ -196,6 +197,7 @@ async fn test_extension_store(cx: &mut TestAppContext) {
capabilities: Vec::new(),
debug_adapters: Default::default(),
debug_locators: Default::default(),
+ language_model_providers: BTreeMap::default(),
}),
dev: false,
},
@@ -376,6 +378,7 @@ async fn test_extension_store(cx: &mut TestAppContext) {
capabilities: Vec::new(),
debug_adapters: Default::default(),
debug_locators: Default::default(),
+ language_model_providers: BTreeMap::default(),
}),
dev: false,
},
@@ -1,3 +1,4 @@
+pub mod llm_provider;
pub mod wit;
use crate::capability_granter::CapabilityGranter;
@@ -0,0 +1,624 @@
+use crate::wasm_host::WasmExtension;
+
+use crate::wasm_host::wit::{
+ LlmCompletionEvent, LlmCompletionRequest, LlmImageData, LlmMessageContent, LlmMessageRole,
+ LlmModelInfo, LlmProviderInfo, LlmRequestMessage, LlmStopReason, LlmThinkingContent,
+ LlmToolChoice, LlmToolDefinition, LlmToolInputFormat, LlmToolResult, LlmToolResultContent,
+ LlmToolUse,
+};
+use anyhow::{Result, anyhow};
+use futures::future::BoxFuture;
+use futures::stream::BoxStream;
+use futures::{FutureExt, StreamExt};
+use gpui::{AnyView, App, AppContext as _, AsyncApp, Context, Entity, EventEmitter, Task, Window};
+use language_model::tool_schema::LanguageModelToolSchemaFormat;
+use language_model::{
+ AuthenticateError, ConfigurationViewTargetAgent, LanguageModel,
+ LanguageModelCacheConfiguration, LanguageModelCompletionError, LanguageModelCompletionEvent,
+ LanguageModelId, LanguageModelName, LanguageModelProvider, LanguageModelProviderId,
+ LanguageModelProviderName, LanguageModelProviderState, LanguageModelRequest,
+ LanguageModelToolChoice, LanguageModelToolUse, LanguageModelToolUseId, StopReason, TokenUsage,
+};
+use std::sync::Arc;
+
+/// An extension-based language model provider.
+pub struct ExtensionLanguageModelProvider {
+ pub extension: WasmExtension,
+ pub provider_info: LlmProviderInfo,
+ state: Entity<ExtensionLlmProviderState>,
+}
+
+pub struct ExtensionLlmProviderState {
+ is_authenticated: bool,
+ available_models: Vec<LlmModelInfo>,
+}
+
+impl EventEmitter<()> for ExtensionLlmProviderState {}
+
+impl ExtensionLanguageModelProvider {
+ pub fn new(
+ extension: WasmExtension,
+ provider_info: LlmProviderInfo,
+ models: Vec<LlmModelInfo>,
+ cx: &mut App,
+ ) -> Self {
+ let state = cx.new(|_| ExtensionLlmProviderState {
+ is_authenticated: false,
+ available_models: models,
+ });
+
+ Self {
+ extension,
+ provider_info,
+ state,
+ }
+ }
+
+ fn provider_id_string(&self) -> String {
+ format!("{}:{}", self.extension.manifest.id, self.provider_info.id)
+ }
+}
+
+impl LanguageModelProvider for ExtensionLanguageModelProvider {
+ fn id(&self) -> LanguageModelProviderId {
+ LanguageModelProviderId::from(self.provider_id_string())
+ }
+
+ fn name(&self) -> LanguageModelProviderName {
+ LanguageModelProviderName::from(self.provider_info.name.clone())
+ }
+
+ fn icon(&self) -> ui::IconName {
+ ui::IconName::ZedAssistant
+ }
+
+ fn default_model(&self, cx: &App) -> Option<Arc<dyn LanguageModel>> {
+ let state = self.state.read(cx);
+ state
+ .available_models
+ .iter()
+ .find(|m| m.is_default)
+ .or_else(|| state.available_models.first())
+ .map(|model_info| {
+ Arc::new(ExtensionLanguageModel {
+ extension: self.extension.clone(),
+ model_info: model_info.clone(),
+ provider_id: self.id(),
+ provider_name: self.name(),
+ provider_info: self.provider_info.clone(),
+ }) as Arc<dyn LanguageModel>
+ })
+ }
+
+ fn default_fast_model(&self, cx: &App) -> Option<Arc<dyn LanguageModel>> {
+ let state = self.state.read(cx);
+ state
+ .available_models
+ .iter()
+ .find(|m| m.is_default_fast)
+ .or_else(|| state.available_models.iter().find(|m| m.is_default))
+ .or_else(|| state.available_models.first())
+ .map(|model_info| {
+ Arc::new(ExtensionLanguageModel {
+ extension: self.extension.clone(),
+ model_info: model_info.clone(),
+ provider_id: self.id(),
+ provider_name: self.name(),
+ provider_info: self.provider_info.clone(),
+ }) as Arc<dyn LanguageModel>
+ })
+ }
+
+ fn provided_models(&self, cx: &App) -> Vec<Arc<dyn LanguageModel>> {
+ let state = self.state.read(cx);
+ state
+ .available_models
+ .iter()
+ .map(|model_info| {
+ Arc::new(ExtensionLanguageModel {
+ extension: self.extension.clone(),
+ model_info: model_info.clone(),
+ provider_id: self.id(),
+ provider_name: self.name(),
+ provider_info: self.provider_info.clone(),
+ }) as Arc<dyn LanguageModel>
+ })
+ .collect()
+ }
+
+ fn is_authenticated(&self, cx: &App) -> bool {
+ self.state.read(cx).is_authenticated
+ }
+
+ fn authenticate(&self, cx: &mut App) -> Task<Result<(), AuthenticateError>> {
+ let extension = self.extension.clone();
+ let provider_id = self.provider_info.id.clone();
+ let state = self.state.clone();
+
+ cx.spawn(async move |cx| {
+ let result = extension
+ .call(|extension, store| {
+ async move {
+ extension
+ .call_llm_provider_authenticate(store, &provider_id)
+ .await
+ }
+ .boxed()
+ })
+ .await;
+
+ match result {
+ Ok(Ok(Ok(()))) => {
+ cx.update(|cx| {
+ state.update(cx, |state, _| {
+ state.is_authenticated = true;
+ });
+ })?;
+ Ok(())
+ }
+ Ok(Ok(Err(e))) => Err(AuthenticateError::Other(anyhow!("{}", e))),
+ Ok(Err(e)) => Err(AuthenticateError::Other(e)),
+ Err(e) => Err(AuthenticateError::Other(e)),
+ }
+ })
+ }
+
+ fn configuration_view(
+ &self,
+ _target_agent: ConfigurationViewTargetAgent,
+ _window: &mut Window,
+ cx: &mut App,
+ ) -> AnyView {
+ cx.new(|_| EmptyConfigView).into()
+ }
+
+ fn reset_credentials(&self, cx: &mut App) -> Task<Result<()>> {
+ let extension = self.extension.clone();
+ let provider_id = self.provider_info.id.clone();
+ let state = self.state.clone();
+
+ cx.spawn(async move |cx| {
+ let result = extension
+ .call(|extension, store| {
+ async move {
+ extension
+ .call_llm_provider_reset_credentials(store, &provider_id)
+ .await
+ }
+ .boxed()
+ })
+ .await;
+
+ match result {
+ Ok(Ok(Ok(()))) => {
+ cx.update(|cx| {
+ state.update(cx, |state, _| {
+ state.is_authenticated = false;
+ });
+ })?;
+ Ok(())
+ }
+ Ok(Ok(Err(e))) => Err(anyhow!("{}", e)),
+ Ok(Err(e)) => Err(e),
+ Err(e) => Err(e),
+ }
+ })
+ }
+}
+
+impl LanguageModelProviderState for ExtensionLanguageModelProvider {
+ type ObservableEntity = ExtensionLlmProviderState;
+
+ fn observable_entity(&self) -> Option<Entity<Self::ObservableEntity>> {
+ Some(self.state.clone())
+ }
+
+ fn subscribe<T: 'static>(
+ &self,
+ cx: &mut Context<T>,
+ callback: impl Fn(&mut T, &mut Context<T>) + 'static,
+ ) -> Option<gpui::Subscription> {
+ Some(cx.subscribe(&self.state, move |this, _, _, cx| callback(this, cx)))
+ }
+}
+
+struct EmptyConfigView;
+
+impl gpui::Render for EmptyConfigView {
+ fn render(
+ &mut self,
+ _window: &mut Window,
+ _cx: &mut gpui::Context<Self>,
+ ) -> impl gpui::IntoElement {
+ gpui::Empty
+ }
+}
+
+/// An extension-based language model.
+pub struct ExtensionLanguageModel {
+ extension: WasmExtension,
+ model_info: LlmModelInfo,
+ provider_id: LanguageModelProviderId,
+ provider_name: LanguageModelProviderName,
+ provider_info: LlmProviderInfo,
+}
+
+impl LanguageModel for ExtensionLanguageModel {
+ fn id(&self) -> LanguageModelId {
+ LanguageModelId::from(format!("{}:{}", self.provider_id.0, self.model_info.id))
+ }
+
+ fn name(&self) -> LanguageModelName {
+ LanguageModelName::from(self.model_info.name.clone())
+ }
+
+ fn provider_id(&self) -> LanguageModelProviderId {
+ self.provider_id.clone()
+ }
+
+ fn provider_name(&self) -> LanguageModelProviderName {
+ self.provider_name.clone()
+ }
+
+ fn telemetry_id(&self) -> String {
+ format!("extension:{}", self.model_info.id)
+ }
+
+ fn supports_images(&self) -> bool {
+ self.model_info.capabilities.supports_images
+ }
+
+ fn supports_tools(&self) -> bool {
+ self.model_info.capabilities.supports_tools
+ }
+
+ fn supports_tool_choice(&self, choice: LanguageModelToolChoice) -> bool {
+ match choice {
+ LanguageModelToolChoice::Auto => self.model_info.capabilities.supports_tool_choice_auto,
+ LanguageModelToolChoice::Any => self.model_info.capabilities.supports_tool_choice_any,
+ LanguageModelToolChoice::None => self.model_info.capabilities.supports_tool_choice_none,
+ }
+ }
+
+ fn tool_input_format(&self) -> LanguageModelToolSchemaFormat {
+ match self.model_info.capabilities.tool_input_format {
+ LlmToolInputFormat::JsonSchema => LanguageModelToolSchemaFormat::JsonSchema,
+ LlmToolInputFormat::Simplified => LanguageModelToolSchemaFormat::JsonSchema,
+ }
+ }
+
+ fn max_token_count(&self) -> u64 {
+ self.model_info.max_token_count
+ }
+
+ fn max_output_tokens(&self) -> Option<u64> {
+ self.model_info.max_output_tokens
+ }
+
+ fn count_tokens(
+ &self,
+ request: LanguageModelRequest,
+ _cx: &App,
+ ) -> BoxFuture<'static, Result<u64>> {
+ let extension = self.extension.clone();
+ let provider_id = self.provider_info.id.clone();
+ let model_id = self.model_info.id.clone();
+
+ async move {
+ let wit_request = convert_request_to_wit(&request);
+
+ let result = extension
+ .call(|ext, store| {
+ async move {
+ ext.call_llm_count_tokens(store, &provider_id, &model_id, &wit_request)
+ .await
+ }
+ .boxed()
+ })
+ .await?;
+
+ match result {
+ Ok(Ok(count)) => Ok(count),
+ Ok(Err(e)) => Err(anyhow!("{}", e)),
+ Err(e) => Err(e),
+ }
+ }
+ .boxed()
+ }
+
+ fn stream_completion(
+ &self,
+ request: LanguageModelRequest,
+ _cx: &AsyncApp,
+ ) -> BoxFuture<
+ 'static,
+ Result<
+ BoxStream<'static, Result<LanguageModelCompletionEvent, LanguageModelCompletionError>>,
+ LanguageModelCompletionError,
+ >,
+ > {
+ let extension = self.extension.clone();
+ let provider_id = self.provider_info.id.clone();
+ let model_id = self.model_info.id.clone();
+
+ async move {
+ let wit_request = convert_request_to_wit(&request);
+
+ // Start the stream and get a stream ID
+ let outer_result = extension
+ .call(|ext, store| {
+ async move {
+ ext.call_llm_stream_completion_start(
+ store,
+ &provider_id,
+ &model_id,
+ &wit_request,
+ )
+ .await
+ }
+ .boxed()
+ })
+ .await
+ .map_err(|e| LanguageModelCompletionError::Other(e))?;
+
+ // Unwrap the inner Result<Result<String, String>>
+ let inner_result =
+ outer_result.map_err(|e| LanguageModelCompletionError::Other(anyhow!("{}", e)))?;
+
+ // Get the stream ID
+ let stream_id =
+ inner_result.map_err(|e| LanguageModelCompletionError::Other(anyhow!("{}", e)))?;
+
+ // Create a stream that polls for events
+ let stream = futures::stream::unfold(
+ (extension, stream_id, false),
+ |(ext, stream_id, done)| async move {
+ if done {
+ return None;
+ }
+
+ let result = ext
+ .call({
+ let stream_id = stream_id.clone();
+ move |ext, store| {
+ async move {
+ ext.call_llm_stream_completion_next(store, &stream_id).await
+ }
+ .boxed()
+ }
+ })
+ .await;
+
+ match result {
+ Ok(Ok(Ok(Some(event)))) => {
+ let converted = convert_completion_event(event);
+ Some((Ok(converted), (ext, stream_id, false)))
+ }
+ Ok(Ok(Ok(None))) => {
+ // Stream complete - close it
+ let _ = ext
+ .call({
+ let stream_id = stream_id.clone();
+ move |ext, store| {
+ async move {
+ ext.call_llm_stream_completion_close(store, &stream_id)
+ .await
+ }
+ .boxed()
+ }
+ })
+ .await;
+ None
+ }
+ Ok(Ok(Err(e))) => {
+ // Extension returned an error - close stream and return error
+ let _ = ext
+ .call({
+ let stream_id = stream_id.clone();
+ move |ext, store| {
+ async move {
+ ext.call_llm_stream_completion_close(store, &stream_id)
+ .await
+ }
+ .boxed()
+ }
+ })
+ .await;
+ Some((
+ Err(LanguageModelCompletionError::Other(anyhow!("{}", e))),
+ (ext, stream_id, true),
+ ))
+ }
+ Ok(Err(e)) => {
+ // WASM call error - close stream and return error
+ let _ = ext
+ .call({
+ let stream_id = stream_id.clone();
+ move |ext, store| {
+ async move {
+ ext.call_llm_stream_completion_close(store, &stream_id)
+ .await
+ }
+ .boxed()
+ }
+ })
+ .await;
+ Some((
+ Err(LanguageModelCompletionError::Other(e)),
+ (ext, stream_id, true),
+ ))
+ }
+ Err(e) => {
+ // Channel error - close stream and return error
+ let _ = ext
+ .call({
+ let stream_id = stream_id.clone();
+ move |ext, store| {
+ async move {
+ ext.call_llm_stream_completion_close(store, &stream_id)
+ .await
+ }
+ .boxed()
+ }
+ })
+ .await;
+ Some((
+ Err(LanguageModelCompletionError::Other(e)),
+ (ext, stream_id, true),
+ ))
+ }
+ }
+ },
+ );
+
+ Ok(stream.boxed())
+ }
+ .boxed()
+ }
+
+ fn cache_configuration(&self) -> Option<LanguageModelCacheConfiguration> {
+ None
+ }
+}
+
+fn convert_request_to_wit(request: &LanguageModelRequest) -> LlmCompletionRequest {
+ let messages = request
+ .messages
+ .iter()
+ .map(|msg| LlmRequestMessage {
+ role: match msg.role {
+ language_model::Role::User => LlmMessageRole::User,
+ language_model::Role::Assistant => LlmMessageRole::Assistant,
+ language_model::Role::System => LlmMessageRole::System,
+ },
+ content: msg
+ .content
+ .iter()
+ .map(|content| match content {
+ language_model::MessageContent::Text(text) => {
+ LlmMessageContent::Text(text.clone())
+ }
+ language_model::MessageContent::Image(image) => {
+ LlmMessageContent::Image(LlmImageData {
+ source: image.source.to_string(),
+ width: Some(image.size.width.0 as u32),
+ height: Some(image.size.height.0 as u32),
+ })
+ }
+ language_model::MessageContent::ToolUse(tool_use) => {
+ LlmMessageContent::ToolUse(LlmToolUse {
+ id: tool_use.id.to_string(),
+ name: tool_use.name.to_string(),
+ input: tool_use.raw_input.clone(),
+ thought_signature: tool_use.thought_signature.clone(),
+ })
+ }
+ language_model::MessageContent::ToolResult(result) => {
+ LlmMessageContent::ToolResult(LlmToolResult {
+ tool_use_id: result.tool_use_id.to_string(),
+ tool_name: result.tool_name.to_string(),
+ is_error: result.is_error,
+ content: match &result.content {
+ language_model::LanguageModelToolResultContent::Text(t) => {
+ LlmToolResultContent::Text(t.to_string())
+ }
+ language_model::LanguageModelToolResultContent::Image(img) => {
+ LlmToolResultContent::Image(LlmImageData {
+ source: img.source.to_string(),
+ width: Some(img.size.width.0 as u32),
+ height: Some(img.size.height.0 as u32),
+ })
+ }
+ },
+ })
+ }
+ language_model::MessageContent::Thinking { text, signature } => {
+ LlmMessageContent::Thinking(LlmThinkingContent {
+ text: text.clone(),
+ signature: signature.clone(),
+ })
+ }
+ language_model::MessageContent::RedactedThinking(data) => {
+ LlmMessageContent::RedactedThinking(data.clone())
+ }
+ })
+ .collect(),
+ cache: msg.cache,
+ })
+ .collect();
+
+ let tools = request
+ .tools
+ .iter()
+ .map(|tool| LlmToolDefinition {
+ name: tool.name.clone(),
+ description: tool.description.clone(),
+ input_schema: serde_json::to_string(&tool.input_schema).unwrap_or_default(),
+ })
+ .collect();
+
+ let tool_choice = request.tool_choice.as_ref().map(|choice| match choice {
+ LanguageModelToolChoice::Auto => LlmToolChoice::Auto,
+ LanguageModelToolChoice::Any => LlmToolChoice::Any,
+ LanguageModelToolChoice::None => LlmToolChoice::None,
+ });
+
+ LlmCompletionRequest {
+ messages,
+ tools,
+ tool_choice,
+ stop_sequences: request.stop.clone(),
+ temperature: request.temperature,
+ thinking_allowed: request.thinking_allowed,
+ max_tokens: None,
+ }
+}
+
+fn convert_completion_event(event: LlmCompletionEvent) -> LanguageModelCompletionEvent {
+ match event {
+ LlmCompletionEvent::Started => LanguageModelCompletionEvent::Started,
+ LlmCompletionEvent::Text(text) => LanguageModelCompletionEvent::Text(text),
+ LlmCompletionEvent::Thinking(thinking) => LanguageModelCompletionEvent::Thinking {
+ text: thinking.text,
+ signature: thinking.signature,
+ },
+ LlmCompletionEvent::RedactedThinking(data) => {
+ LanguageModelCompletionEvent::RedactedThinking { data }
+ }
+ LlmCompletionEvent::ToolUse(tool_use) => {
+ LanguageModelCompletionEvent::ToolUse(LanguageModelToolUse {
+ id: LanguageModelToolUseId::from(tool_use.id),
+ name: tool_use.name.into(),
+ raw_input: tool_use.input.clone(),
+ input: serde_json::from_str(&tool_use.input).unwrap_or(serde_json::Value::Null),
+ is_input_complete: true,
+ thought_signature: tool_use.thought_signature,
+ })
+ }
+ LlmCompletionEvent::ToolUseJsonParseError(error) => {
+ LanguageModelCompletionEvent::ToolUseJsonParseError {
+ id: LanguageModelToolUseId::from(error.id),
+ tool_name: error.tool_name.into(),
+ raw_input: error.raw_input.into(),
+ json_parse_error: error.error,
+ }
+ }
+ LlmCompletionEvent::Stop(reason) => LanguageModelCompletionEvent::Stop(match reason {
+ LlmStopReason::EndTurn => StopReason::EndTurn,
+ LlmStopReason::MaxTokens => StopReason::MaxTokens,
+ LlmStopReason::ToolUse => StopReason::ToolUse,
+ LlmStopReason::Refusal => StopReason::Refusal,
+ }),
+ LlmCompletionEvent::Usage(usage) => LanguageModelCompletionEvent::UsageUpdate(TokenUsage {
+ input_tokens: usage.input_tokens,
+ output_tokens: usage.output_tokens,
+ cache_creation_input_tokens: usage.cache_creation_input_tokens.unwrap_or(0),
+ cache_read_input_tokens: usage.cache_read_input_tokens.unwrap_or(0),
+ }),
+ LlmCompletionEvent::ReasoningDetails(json) => {
+ LanguageModelCompletionEvent::ReasoningDetails(
+ serde_json::from_str(&json).unwrap_or(serde_json::Value::Null),
+ )
+ }
+ }
+}
@@ -7,6 +7,7 @@ mod since_v0_3_0;
mod since_v0_4_0;
mod since_v0_5_0;
mod since_v0_6_0;
+mod since_v0_7_0;
use dap::DebugRequest;
use extension::{DebugTaskDefinition, KeyValueStoreDelegate, WorktreeDelegate};
use gpui::BackgroundExecutor;
@@ -15,12 +16,12 @@ use lsp::LanguageServerName;
use release_channel::ReleaseChannel;
use task::{DebugScenario, SpawnInTerminal, TaskTemplate, ZedDebugConfig};
-use crate::wasm_host::wit::since_v0_6_0::dap::StartDebuggingRequestArgumentsRequest;
+use crate::wasm_host::wit::since_v0_7_0::dap::StartDebuggingRequestArgumentsRequest;
use super::{WasmState, wasm_engine};
use anyhow::{Context as _, Result, anyhow};
use semver::Version;
-use since_v0_6_0 as latest;
+use since_v0_7_0 as latest;
use std::{ops::RangeInclusive, path::PathBuf, sync::Arc};
use wasmtime::{
Store,
@@ -32,6 +33,19 @@ pub use latest::CodeLabelSpanLiteral;
pub use latest::{
CodeLabel, CodeLabelSpan, Command, DebugAdapterBinary, ExtensionProject, Range, SlashCommand,
zed::extension::context_server::ContextServerConfiguration,
+ zed::extension::llm_provider::{
+ CacheConfiguration as LlmCacheConfiguration, CompletionEvent as LlmCompletionEvent,
+ CompletionRequest as LlmCompletionRequest, CredentialType as LlmCredentialType,
+ ImageData as LlmImageData, MessageContent as LlmMessageContent,
+ MessageRole as LlmMessageRole, ModelCapabilities as LlmModelCapabilities,
+ ModelInfo as LlmModelInfo, ProviderInfo as LlmProviderInfo,
+ RequestMessage as LlmRequestMessage, StopReason as LlmStopReason,
+ ThinkingContent as LlmThinkingContent, TokenUsage as LlmTokenUsage,
+ ToolChoice as LlmToolChoice, ToolDefinition as LlmToolDefinition,
+ ToolInputFormat as LlmToolInputFormat, ToolResult as LlmToolResult,
+ ToolResultContent as LlmToolResultContent, ToolUse as LlmToolUse,
+ ToolUseJsonParseError as LlmToolUseJsonParseError,
+ },
zed::extension::lsp::{
Completion, CompletionKind, CompletionLabelDetails, InsertTextFormat, Symbol, SymbolKind,
},
@@ -95,6 +109,7 @@ pub fn authorize_access_to_unreleased_wasm_api_version(
}
pub enum Extension {
+ V0_7_0(since_v0_7_0::Extension),
V0_6_0(since_v0_6_0::Extension),
V0_5_0(since_v0_5_0::Extension),
V0_4_0(since_v0_4_0::Extension),
@@ -122,6 +137,15 @@ impl Extension {
latest::Extension::instantiate_async(store, component, latest::linker(executor))
.await
.context("failed to instantiate wasm extension")?;
+ Ok(Self::V0_7_0(extension))
+ } else if version >= since_v0_6_0::MIN_VERSION {
+ let extension = since_v0_6_0::Extension::instantiate_async(
+ store,
+ component,
+ since_v0_6_0::linker(executor),
+ )
+ .await
+ .context("failed to instantiate wasm extension")?;
Ok(Self::V0_6_0(extension))
} else if version >= since_v0_5_0::MIN_VERSION {
let extension = since_v0_5_0::Extension::instantiate_async(
@@ -200,6 +224,7 @@ impl Extension {
pub async fn call_init_extension(&self, store: &mut Store<WasmState>) -> Result<()> {
match self {
+ Extension::V0_7_0(ext) => ext.call_init_extension(store).await,
Extension::V0_6_0(ext) => ext.call_init_extension(store).await,
Extension::V0_5_0(ext) => ext.call_init_extension(store).await,
Extension::V0_4_0(ext) => ext.call_init_extension(store).await,
@@ -220,6 +245,10 @@ impl Extension {
resource: Resource<Arc<dyn WorktreeDelegate>>,
) -> Result<Result<Command, String>> {
match self {
+ Extension::V0_7_0(ext) => {
+ ext.call_language_server_command(store, &language_server_id.0, resource)
+ .await
+ }
Extension::V0_6_0(ext) => {
ext.call_language_server_command(store, &language_server_id.0, resource)
.await
@@ -282,6 +311,14 @@ impl Extension {
resource: Resource<Arc<dyn WorktreeDelegate>>,
) -> Result<Result<Option<String>, String>> {
match self {
+ Extension::V0_7_0(ext) => {
+ ext.call_language_server_initialization_options(
+ store,
+ &language_server_id.0,
+ resource,
+ )
+ .await
+ }
Extension::V0_6_0(ext) => {
ext.call_language_server_initialization_options(
store,
@@ -371,6 +408,14 @@ impl Extension {
resource: Resource<Arc<dyn WorktreeDelegate>>,
) -> Result<Result<Option<String>, String>> {
match self {
+ Extension::V0_7_0(ext) => {
+ ext.call_language_server_workspace_configuration(
+ store,
+ &language_server_id.0,
+ resource,
+ )
+ .await
+ }
Extension::V0_6_0(ext) => {
ext.call_language_server_workspace_configuration(
store,
@@ -439,6 +484,15 @@ impl Extension {
resource: Resource<Arc<dyn WorktreeDelegate>>,
) -> Result<Result<Option<String>, String>> {
match self {
+ Extension::V0_7_0(ext) => {
+ ext.call_language_server_additional_initialization_options(
+ store,
+ &language_server_id.0,
+ &target_language_server_id.0,
+ resource,
+ )
+ .await
+ }
Extension::V0_6_0(ext) => {
ext.call_language_server_additional_initialization_options(
store,
@@ -483,6 +537,15 @@ impl Extension {
resource: Resource<Arc<dyn WorktreeDelegate>>,
) -> Result<Result<Option<String>, String>> {
match self {
+ Extension::V0_7_0(ext) => {
+ ext.call_language_server_additional_workspace_configuration(
+ store,
+ &language_server_id.0,
+ &target_language_server_id.0,
+ resource,
+ )
+ .await
+ }
Extension::V0_6_0(ext) => {
ext.call_language_server_additional_workspace_configuration(
store,
@@ -526,10 +589,23 @@ impl Extension {
completions: Vec<latest::Completion>,
) -> Result<Result<Vec<Option<CodeLabel>>, String>> {
match self {
- Extension::V0_6_0(ext) => {
+ Extension::V0_7_0(ext) => {
ext.call_labels_for_completions(store, &language_server_id.0, &completions)
.await
}
+ Extension::V0_6_0(ext) => Ok(ext
+ .call_labels_for_completions(
+ store,
+ &language_server_id.0,
+ &completions.into_iter().collect::<Vec<_>>(),
+ )
+ .await?
+ .map(|labels| {
+ labels
+ .into_iter()
+ .map(|label| label.map(Into::into))
+ .collect()
+ })),
Extension::V0_5_0(ext) => Ok(ext
.call_labels_for_completions(
store,
@@ -619,10 +695,23 @@ impl Extension {
symbols: Vec<latest::Symbol>,
) -> Result<Result<Vec<Option<CodeLabel>>, String>> {
match self {
- Extension::V0_6_0(ext) => {
+ Extension::V0_7_0(ext) => {
ext.call_labels_for_symbols(store, &language_server_id.0, &symbols)
.await
}
+ Extension::V0_6_0(ext) => Ok(ext
+ .call_labels_for_symbols(
+ store,
+ &language_server_id.0,
+ &symbols.into_iter().collect::<Vec<_>>(),
+ )
+ .await?
+ .map(|labels| {
+ labels
+ .into_iter()
+ .map(|label| label.map(Into::into))
+ .collect()
+ })),
Extension::V0_5_0(ext) => Ok(ext
.call_labels_for_symbols(
store,
@@ -712,6 +801,10 @@ impl Extension {
arguments: &[String],
) -> Result<Result<Vec<SlashCommandArgumentCompletion>, String>> {
match self {
+ Extension::V0_7_0(ext) => {
+ ext.call_complete_slash_command_argument(store, command, arguments)
+ .await
+ }
Extension::V0_6_0(ext) => {
ext.call_complete_slash_command_argument(store, command, arguments)
.await
@@ -750,6 +843,10 @@ impl Extension {
resource: Option<Resource<Arc<dyn WorktreeDelegate>>>,
) -> Result<Result<SlashCommandOutput, String>> {
match self {
+ Extension::V0_7_0(ext) => {
+ ext.call_run_slash_command(store, command, arguments, resource)
+ .await
+ }
Extension::V0_6_0(ext) => {
ext.call_run_slash_command(store, command, arguments, resource)
.await
@@ -787,6 +884,10 @@ impl Extension {
project: Resource<ExtensionProject>,
) -> Result<Result<Command, String>> {
match self {
+ Extension::V0_7_0(ext) => {
+ ext.call_context_server_command(store, &context_server_id, project)
+ .await
+ }
Extension::V0_6_0(ext) => {
ext.call_context_server_command(store, &context_server_id, project)
.await
@@ -823,6 +924,10 @@ impl Extension {
project: Resource<ExtensionProject>,
) -> Result<Result<Option<ContextServerConfiguration>, String>> {
match self {
+ Extension::V0_7_0(ext) => {
+ ext.call_context_server_configuration(store, &context_server_id, project)
+ .await
+ }
Extension::V0_6_0(ext) => {
ext.call_context_server_configuration(store, &context_server_id, project)
.await
@@ -849,6 +954,7 @@ impl Extension {
provider: &str,
) -> Result<Result<Vec<String>, String>> {
match self {
+ Extension::V0_7_0(ext) => ext.call_suggest_docs_packages(store, provider).await,
Extension::V0_6_0(ext) => ext.call_suggest_docs_packages(store, provider).await,
Extension::V0_5_0(ext) => ext.call_suggest_docs_packages(store, provider).await,
Extension::V0_4_0(ext) => ext.call_suggest_docs_packages(store, provider).await,
@@ -869,6 +975,10 @@ impl Extension {
kv_store: Resource<Arc<dyn KeyValueStoreDelegate>>,
) -> Result<Result<(), String>> {
match self {
+ Extension::V0_7_0(ext) => {
+ ext.call_index_docs(store, provider, package_name, kv_store)
+ .await
+ }
Extension::V0_6_0(ext) => {
ext.call_index_docs(store, provider, package_name, kv_store)
.await
@@ -907,6 +1017,20 @@ impl Extension {
resource: Resource<Arc<dyn WorktreeDelegate>>,
) -> Result<Result<DebugAdapterBinary, String>> {
match self {
+ Extension::V0_7_0(ext) => {
+ let dap_binary = ext
+ .call_get_dap_binary(
+ store,
+ &adapter_name,
+ &task.try_into()?,
+ user_installed_path.as_ref().and_then(|p| p.to_str()),
+ resource,
+ )
+ .await?
+ .map_err(|e| anyhow!("{e:?}"))?;
+
+ Ok(Ok(dap_binary))
+ }
Extension::V0_6_0(ext) => {
let dap_binary = ext
.call_get_dap_binary(
@@ -931,6 +1055,16 @@ impl Extension {
config: serde_json::Value,
) -> Result<Result<StartDebuggingRequestArgumentsRequest, String>> {
match self {
+ Extension::V0_7_0(ext) => {
+ let config =
+ serde_json::to_string(&config).context("Adapter config is not a valid JSON")?;
+ let dap_binary = ext
+ .call_dap_request_kind(store, &adapter_name, &config)
+ .await?
+ .map_err(|e| anyhow!("{e:?}"))?;
+
+ Ok(Ok(dap_binary))
+ }
Extension::V0_6_0(ext) => {
let config =
serde_json::to_string(&config).context("Adapter config is not a valid JSON")?;
@@ -950,6 +1084,15 @@ impl Extension {
config: ZedDebugConfig,
) -> Result<Result<DebugScenario, String>> {
match self {
+ Extension::V0_7_0(ext) => {
+ let config = config.into();
+ let dap_binary = ext
+ .call_dap_config_to_scenario(store, &config)
+ .await?
+ .map_err(|e| anyhow!("{e:?}"))?;
+
+ Ok(Ok(dap_binary.try_into()?))
+ }
Extension::V0_6_0(ext) => {
let config = config.into();
let dap_binary = ext
@@ -971,6 +1114,20 @@ impl Extension {
debug_adapter_name: String,
) -> Result<Option<DebugScenario>> {
match self {
+ Extension::V0_7_0(ext) => {
+ let build_config_template = build_config_template.into();
+ let dap_binary = ext
+ .call_dap_locator_create_scenario(
+ store,
+ &locator_name,
+ &build_config_template,
+ &resolved_label,
+ &debug_adapter_name,
+ )
+ .await?;
+
+ Ok(dap_binary.map(TryInto::try_into).transpose()?)
+ }
Extension::V0_6_0(ext) => {
let build_config_template = build_config_template.into();
let dap_binary = ext
@@ -995,6 +1152,15 @@ impl Extension {
resolved_build_task: SpawnInTerminal,
) -> Result<Result<DebugRequest, String>> {
match self {
+ Extension::V0_7_0(ext) => {
+ let build_config_template = resolved_build_task.try_into()?;
+ let dap_request = ext
+ .call_run_dap_locator(store, &locator_name, &build_config_template)
+ .await?
+ .map_err(|e| anyhow!("{e:?}"))?;
+
+ Ok(Ok(dap_request.into()))
+ }
Extension::V0_6_0(ext) => {
let build_config_template = resolved_build_task.try_into()?;
let dap_request = ext
@@ -1007,6 +1173,139 @@ impl Extension {
_ => anyhow::bail!("`dap_locator_create_scenario` not available prior to v0.6.0"),
}
}
+
+ // =========================================================================
+ // LLM Provider Methods (v0.7.0+)
+ // =========================================================================
+
+ pub async fn call_llm_providers(
+ &self,
+ store: &mut Store<WasmState>,
+ ) -> Result<Vec<latest::llm_provider::ProviderInfo>> {
+ match self {
+ Extension::V0_7_0(ext) => ext.call_llm_providers(store).await,
+ _ => Ok(Vec::new()),
+ }
+ }
+
+ pub async fn call_llm_provider_models(
+ &self,
+ store: &mut Store<WasmState>,
+ provider_id: &str,
+ ) -> Result<Result<Vec<latest::llm_provider::ModelInfo>, String>> {
+ match self {
+ Extension::V0_7_0(ext) => ext.call_llm_provider_models(store, provider_id).await,
+ _ => anyhow::bail!("`llm_provider_models` not available prior to v0.7.0"),
+ }
+ }
+
+ pub async fn call_llm_provider_is_authenticated(
+ &self,
+ store: &mut Store<WasmState>,
+ provider_id: &str,
+ ) -> Result<bool> {
+ match self {
+ Extension::V0_7_0(ext) => {
+ ext.call_llm_provider_is_authenticated(store, provider_id)
+ .await
+ }
+ _ => Ok(false),
+ }
+ }
+
+ pub async fn call_llm_provider_authenticate(
+ &self,
+ store: &mut Store<WasmState>,
+ provider_id: &str,
+ ) -> Result<Result<(), String>> {
+ match self {
+ Extension::V0_7_0(ext) => ext.call_llm_provider_authenticate(store, provider_id).await,
+ _ => anyhow::bail!("`llm_provider_authenticate` not available prior to v0.7.0"),
+ }
+ }
+
+ pub async fn call_llm_provider_reset_credentials(
+ &self,
+ store: &mut Store<WasmState>,
+ provider_id: &str,
+ ) -> Result<Result<(), String>> {
+ match self {
+ Extension::V0_7_0(ext) => {
+ ext.call_llm_provider_reset_credentials(store, provider_id)
+ .await
+ }
+ _ => anyhow::bail!("`llm_provider_reset_credentials` not available prior to v0.7.0"),
+ }
+ }
+
+ pub async fn call_llm_count_tokens(
+ &self,
+ store: &mut Store<WasmState>,
+ provider_id: &str,
+ model_id: &str,
+ request: &latest::llm_provider::CompletionRequest,
+ ) -> Result<Result<u64, String>> {
+ match self {
+ Extension::V0_7_0(ext) => {
+ ext.call_llm_count_tokens(store, provider_id, model_id, request)
+ .await
+ }
+ _ => anyhow::bail!("`llm_count_tokens` not available prior to v0.7.0"),
+ }
+ }
+
+ pub async fn call_llm_stream_completion_start(
+ &self,
+ store: &mut Store<WasmState>,
+ provider_id: &str,
+ model_id: &str,
+ request: &latest::llm_provider::CompletionRequest,
+ ) -> Result<Result<String, String>> {
+ match self {
+ Extension::V0_7_0(ext) => {
+ ext.call_llm_stream_completion_start(store, provider_id, model_id, request)
+ .await
+ }
+ _ => anyhow::bail!("`llm_stream_completion_start` not available prior to v0.7.0"),
+ }
+ }
+
+ pub async fn call_llm_stream_completion_next(
+ &self,
+ store: &mut Store<WasmState>,
+ stream_id: &str,
+ ) -> Result<Result<Option<latest::llm_provider::CompletionEvent>, String>> {
+ match self {
+ Extension::V0_7_0(ext) => ext.call_llm_stream_completion_next(store, stream_id).await,
+ _ => anyhow::bail!("`llm_stream_completion_next` not available prior to v0.7.0"),
+ }
+ }
+
+ pub async fn call_llm_stream_completion_close(
+ &self,
+ store: &mut Store<WasmState>,
+ stream_id: &str,
+ ) -> Result<()> {
+ match self {
+ Extension::V0_7_0(ext) => ext.call_llm_stream_completion_close(store, stream_id).await,
+ _ => anyhow::bail!("`llm_stream_completion_close` not available prior to v0.7.0"),
+ }
+ }
+
+ pub async fn call_llm_cache_configuration(
+ &self,
+ store: &mut Store<WasmState>,
+ provider_id: &str,
+ model_id: &str,
+ ) -> Result<Option<latest::llm_provider::CacheConfiguration>> {
+ match self {
+ Extension::V0_7_0(ext) => {
+ ext.call_llm_cache_configuration(store, provider_id, model_id)
+ .await
+ }
+ _ => Ok(None),
+ }
+ }
}
trait ToWasmtimeResult<T> {
@@ -1,42 +1,15 @@
-use crate::wasm_host::wit::since_v0_6_0::{
- dap::{
- AttachRequest, BuildTaskDefinition, BuildTaskDefinitionTemplatePayload, LaunchRequest,
- StartDebuggingRequestArguments, TcpArguments, TcpArgumentsTemplate,
- },
- slash_command::SlashCommandOutputSection,
-};
-use crate::wasm_host::wit::{CompletionKind, CompletionLabelDetails, InsertTextFormat, SymbolKind};
-use crate::wasm_host::{WasmState, wit::ToWasmtimeResult};
-use ::http_client::{AsyncBody, HttpRequestExt};
-use ::settings::{Settings, WorktreeId};
-use anyhow::{Context as _, Result, bail};
-use async_compression::futures::bufread::GzipDecoder;
-use async_tar::Archive;
-use async_trait::async_trait;
-use extension::{
- ExtensionLanguageServerProxy, KeyValueStoreDelegate, ProjectDelegate, WorktreeDelegate,
-};
-use futures::{AsyncReadExt, lock::Mutex};
-use futures::{FutureExt as _, io::BufReader};
-use gpui::{BackgroundExecutor, SharedString};
-use language::{BinaryStatus, LanguageName, language_settings::AllLanguageSettings};
-use project::project_settings::ProjectSettings;
+use crate::wasm_host::WasmState;
+use anyhow::Result;
+use extension::{KeyValueStoreDelegate, ProjectDelegate, WorktreeDelegate};
+use gpui::BackgroundExecutor;
use semver::Version;
-use std::{
- env,
- net::Ipv4Addr,
- path::{Path, PathBuf},
- str::FromStr,
- sync::{Arc, OnceLock},
-};
-use task::{SpawnInTerminal, ZedDebugConfig};
-use url::Url;
-use util::{
- archive::extract_zip, fs::make_file_executable, maybe, paths::PathStyle, rel_path::RelPath,
-};
+use std::sync::{Arc, OnceLock};
use wasmtime::component::{Linker, Resource};
+use super::latest;
+
pub const MIN_VERSION: Version = Version::new(0, 6, 0);
+#[allow(dead_code)]
pub const MAX_VERSION: Version = Version::new(0, 7, 0);
wasmtime::component::bindgen!({
@@ -44,15 +17,22 @@ wasmtime::component::bindgen!({
trappable_imports: true,
path: "../extension_api/wit/since_v0.6.0",
with: {
- "worktree": ExtensionWorktree,
- "project": ExtensionProject,
- "key-value-store": ExtensionKeyValueStore,
- "zed:extension/http-client/http-response-stream": ExtensionHttpResponseStream
+ "worktree": ExtensionWorktree,
+ "project": ExtensionProject,
+ "key-value-store": ExtensionKeyValueStore,
+ "zed:extension/common": latest::zed::extension::common,
+ "zed:extension/github": latest::zed::extension::github,
+ "zed:extension/http-client": latest::zed::extension::http_client,
+ "zed:extension/lsp": latest::zed::extension::lsp,
+ "zed:extension/nodejs": latest::zed::extension::nodejs,
+ "zed:extension/platform": latest::zed::extension::platform,
+ "zed:extension/process": latest::zed::extension::process,
+ "zed:extension/slash-command": latest::zed::extension::slash_command,
+ "zed:extension/context-server": latest::zed::extension::context_server,
+ "zed:extension/dap": latest::zed::extension::dap,
},
});
-pub use self::zed::extension::*;
-
mod settings {
#![allow(dead_code)]
include!(concat!(env!("OUT_DIR"), "/since_v0.6.0/settings.rs"));
@@ -61,289 +41,32 @@ mod settings {
pub type ExtensionWorktree = Arc<dyn WorktreeDelegate>;
pub type ExtensionProject = Arc<dyn ProjectDelegate>;
pub type ExtensionKeyValueStore = Arc<dyn KeyValueStoreDelegate>;
-pub type ExtensionHttpResponseStream = Arc<Mutex<::http_client::Response<AsyncBody>>>;
pub fn linker(executor: &BackgroundExecutor) -> &'static Linker<WasmState> {
static LINKER: OnceLock<Linker<WasmState>> = OnceLock::new();
LINKER.get_or_init(|| super::new_linker(executor, Extension::add_to_linker))
}
-impl From<Range> for std::ops::Range<usize> {
- fn from(range: Range) -> Self {
- let start = range.start as usize;
- let end = range.end as usize;
- start..end
- }
-}
-
-impl From<Command> for extension::Command {
- fn from(value: Command) -> Self {
- Self {
- command: value.command.into(),
- args: value.args,
- env: value.env,
- }
- }
-}
-
-impl From<StartDebuggingRequestArgumentsRequest>
- for extension::StartDebuggingRequestArgumentsRequest
-{
- fn from(value: StartDebuggingRequestArgumentsRequest) -> Self {
- match value {
- StartDebuggingRequestArgumentsRequest::Launch => Self::Launch,
- StartDebuggingRequestArgumentsRequest::Attach => Self::Attach,
- }
- }
-}
-impl TryFrom<StartDebuggingRequestArguments> for extension::StartDebuggingRequestArguments {
- type Error = anyhow::Error;
-
- fn try_from(value: StartDebuggingRequestArguments) -> Result<Self, Self::Error> {
- Ok(Self {
- configuration: serde_json::from_str(&value.configuration)?,
- request: value.request.into(),
- })
- }
-}
-impl From<TcpArguments> for extension::TcpArguments {
- fn from(value: TcpArguments) -> Self {
- Self {
- host: value.host.into(),
- port: value.port,
- timeout: value.timeout,
- }
- }
-}
-
-impl From<extension::TcpArgumentsTemplate> for TcpArgumentsTemplate {
- fn from(value: extension::TcpArgumentsTemplate) -> Self {
- Self {
- host: value.host.map(Ipv4Addr::to_bits),
- port: value.port,
- timeout: value.timeout,
- }
- }
-}
-
-impl From<TcpArgumentsTemplate> for extension::TcpArgumentsTemplate {
- fn from(value: TcpArgumentsTemplate) -> Self {
- Self {
- host: value.host.map(Ipv4Addr::from_bits),
- port: value.port,
- timeout: value.timeout,
- }
- }
-}
-
-impl TryFrom<extension::DebugTaskDefinition> for DebugTaskDefinition {
- type Error = anyhow::Error;
- fn try_from(value: extension::DebugTaskDefinition) -> Result<Self, Self::Error> {
- Ok(Self {
- label: value.label.to_string(),
- adapter: value.adapter.to_string(),
- config: value.config.to_string(),
- tcp_connection: value.tcp_connection.map(Into::into),
- })
- }
-}
-
-impl From<task::DebugRequest> for DebugRequest {
- fn from(value: task::DebugRequest) -> Self {
- match value {
- task::DebugRequest::Launch(launch_request) => Self::Launch(launch_request.into()),
- task::DebugRequest::Attach(attach_request) => Self::Attach(attach_request.into()),
- }
- }
-}
-
-impl From<DebugRequest> for task::DebugRequest {
- fn from(value: DebugRequest) -> Self {
- match value {
- DebugRequest::Launch(launch_request) => Self::Launch(launch_request.into()),
- DebugRequest::Attach(attach_request) => Self::Attach(attach_request.into()),
- }
- }
-}
-
-impl From<task::LaunchRequest> for LaunchRequest {
- fn from(value: task::LaunchRequest) -> Self {
- Self {
- program: value.program,
- cwd: value.cwd.map(|p| p.to_string_lossy().into_owned()),
- args: value.args,
- envs: value.env.into_iter().collect(),
- }
- }
-}
-
-impl From<task::AttachRequest> for AttachRequest {
- fn from(value: task::AttachRequest) -> Self {
- Self {
- process_id: value.process_id,
- }
- }
-}
-
-impl From<LaunchRequest> for task::LaunchRequest {
- fn from(value: LaunchRequest) -> Self {
- Self {
- program: value.program,
- cwd: value.cwd.map(|p| p.into()),
- args: value.args,
- env: value.envs.into_iter().collect(),
- }
- }
-}
-impl From<AttachRequest> for task::AttachRequest {
- fn from(value: AttachRequest) -> Self {
- Self {
- process_id: value.process_id,
- }
- }
-}
-
-impl From<ZedDebugConfig> for DebugConfig {
- fn from(value: ZedDebugConfig) -> Self {
- Self {
- label: value.label.into(),
- adapter: value.adapter.into(),
- request: value.request.into(),
- stop_on_entry: value.stop_on_entry,
- }
- }
-}
-impl TryFrom<DebugAdapterBinary> for extension::DebugAdapterBinary {
- type Error = anyhow::Error;
- fn try_from(value: DebugAdapterBinary) -> Result<Self, Self::Error> {
- Ok(Self {
- command: value.command,
- arguments: value.arguments,
- envs: value.envs.into_iter().collect(),
- cwd: value.cwd.map(|s| s.into()),
- connection: value.connection.map(Into::into),
- request_args: value.request_args.try_into()?,
- })
- }
-}
-
-impl From<BuildTaskDefinition> for extension::BuildTaskDefinition {
- fn from(value: BuildTaskDefinition) -> Self {
- match value {
- BuildTaskDefinition::ByName(name) => Self::ByName(name.into()),
- BuildTaskDefinition::Template(build_task_template) => Self::Template {
- task_template: build_task_template.template.into(),
- locator_name: build_task_template.locator_name.map(SharedString::from),
- },
- }
- }
-}
-
-impl From<extension::BuildTaskDefinition> for BuildTaskDefinition {
- fn from(value: extension::BuildTaskDefinition) -> Self {
- match value {
- extension::BuildTaskDefinition::ByName(name) => Self::ByName(name.into()),
- extension::BuildTaskDefinition::Template {
- task_template,
- locator_name,
- } => Self::Template(BuildTaskDefinitionTemplatePayload {
- template: task_template.into(),
- locator_name: locator_name.map(String::from),
- }),
- }
- }
-}
-impl From<BuildTaskTemplate> for extension::BuildTaskTemplate {
- fn from(value: BuildTaskTemplate) -> Self {
- Self {
- label: value.label,
- command: value.command,
- args: value.args,
- env: value.env.into_iter().collect(),
- cwd: value.cwd,
- ..Default::default()
- }
- }
-}
-impl From<extension::BuildTaskTemplate> for BuildTaskTemplate {
- fn from(value: extension::BuildTaskTemplate) -> Self {
- Self {
- label: value.label,
- command: value.command,
- args: value.args,
- env: value.env.into_iter().collect(),
- cwd: value.cwd,
- }
- }
-}
-
-impl TryFrom<DebugScenario> for extension::DebugScenario {
- type Error = anyhow::Error;
-
- fn try_from(value: DebugScenario) -> std::result::Result<Self, Self::Error> {
- Ok(Self {
- adapter: value.adapter.into(),
- label: value.label.into(),
- build: value.build.map(Into::into),
- config: serde_json::Value::from_str(&value.config)?,
- tcp_connection: value.tcp_connection.map(Into::into),
- })
- }
-}
-
-impl From<extension::DebugScenario> for DebugScenario {
- fn from(value: extension::DebugScenario) -> Self {
- Self {
- adapter: value.adapter.into(),
- label: value.label.into(),
- build: value.build.map(Into::into),
- config: value.config.to_string(),
- tcp_connection: value.tcp_connection.map(Into::into),
- }
- }
-}
-
-impl TryFrom<SpawnInTerminal> for ResolvedTask {
- type Error = anyhow::Error;
-
- fn try_from(value: SpawnInTerminal) -> Result<Self, Self::Error> {
- Ok(Self {
- label: value.label,
- command: value.command.context("missing command")?,
- args: value.args,
- env: value.env.into_iter().collect(),
- cwd: value.cwd.map(|s| {
- let s = s.to_string_lossy();
- if cfg!(target_os = "windows") {
- s.replace('\\', "/")
- } else {
- s.into_owned()
- }
- }),
- })
- }
-}
-
-impl From<CodeLabel> for extension::CodeLabel {
+impl From<CodeLabel> for latest::CodeLabel {
fn from(value: CodeLabel) -> Self {
Self {
code: value.code,
spans: value.spans.into_iter().map(Into::into).collect(),
- filter_range: value.filter_range.into(),
+ filter_range: value.filter_range,
}
}
}
-impl From<CodeLabelSpan> for extension::CodeLabelSpan {
+impl From<CodeLabelSpan> for latest::CodeLabelSpan {
fn from(value: CodeLabelSpan) -> Self {
match value {
- CodeLabelSpan::CodeRange(range) => Self::CodeRange(range.into()),
+ CodeLabelSpan::CodeRange(range) => Self::CodeRange(range),
CodeLabelSpan::Literal(literal) => Self::Literal(literal.into()),
}
}
}
-impl From<CodeLabelSpanLiteral> for extension::CodeLabelSpanLiteral {
+impl From<CodeLabelSpanLiteral> for latest::CodeLabelSpanLiteral {
fn from(value: CodeLabelSpanLiteral) -> Self {
Self {
text: value.text,
@@ -352,167 +75,37 @@ impl From<CodeLabelSpanLiteral> for extension::CodeLabelSpanLiteral {
}
}
-impl From<extension::Completion> for Completion {
- fn from(value: extension::Completion) -> Self {
- Self {
- label: value.label,
- label_details: value.label_details.map(Into::into),
- detail: value.detail,
- kind: value.kind.map(Into::into),
- insert_text_format: value.insert_text_format.map(Into::into),
- }
- }
-}
-
-impl From<extension::CompletionLabelDetails> for CompletionLabelDetails {
- fn from(value: extension::CompletionLabelDetails) -> Self {
+impl From<SettingsLocation> for latest::SettingsLocation {
+ fn from(value: SettingsLocation) -> Self {
Self {
- detail: value.detail,
- description: value.description,
+ worktree_id: value.worktree_id,
+ path: value.path,
}
}
}
-impl From<extension::CompletionKind> for CompletionKind {
- fn from(value: extension::CompletionKind) -> Self {
+impl From<LanguageServerInstallationStatus> for latest::LanguageServerInstallationStatus {
+ fn from(value: LanguageServerInstallationStatus) -> Self {
match value {
- extension::CompletionKind::Text => Self::Text,
- extension::CompletionKind::Method => Self::Method,
- extension::CompletionKind::Function => Self::Function,
- extension::CompletionKind::Constructor => Self::Constructor,
- extension::CompletionKind::Field => Self::Field,
- extension::CompletionKind::Variable => Self::Variable,
- extension::CompletionKind::Class => Self::Class,
- extension::CompletionKind::Interface => Self::Interface,
- extension::CompletionKind::Module => Self::Module,
- extension::CompletionKind::Property => Self::Property,
- extension::CompletionKind::Unit => Self::Unit,
- extension::CompletionKind::Value => Self::Value,
- extension::CompletionKind::Enum => Self::Enum,
- extension::CompletionKind::Keyword => Self::Keyword,
- extension::CompletionKind::Snippet => Self::Snippet,
- extension::CompletionKind::Color => Self::Color,
- extension::CompletionKind::File => Self::File,
- extension::CompletionKind::Reference => Self::Reference,
- extension::CompletionKind::Folder => Self::Folder,
- extension::CompletionKind::EnumMember => Self::EnumMember,
- extension::CompletionKind::Constant => Self::Constant,
- extension::CompletionKind::Struct => Self::Struct,
- extension::CompletionKind::Event => Self::Event,
- extension::CompletionKind::Operator => Self::Operator,
- extension::CompletionKind::TypeParameter => Self::TypeParameter,
- extension::CompletionKind::Other(value) => Self::Other(value),
+ LanguageServerInstallationStatus::None => Self::None,
+ LanguageServerInstallationStatus::Downloading => Self::Downloading,
+ LanguageServerInstallationStatus::CheckingForUpdate => Self::CheckingForUpdate,
+ LanguageServerInstallationStatus::Failed(message) => Self::Failed(message),
}
}
}
-impl From<extension::InsertTextFormat> for InsertTextFormat {
- fn from(value: extension::InsertTextFormat) -> Self {
+impl From<DownloadedFileType> for latest::DownloadedFileType {
+ fn from(value: DownloadedFileType) -> Self {
match value {
- extension::InsertTextFormat::PlainText => Self::PlainText,
- extension::InsertTextFormat::Snippet => Self::Snippet,
- extension::InsertTextFormat::Other(value) => Self::Other(value),
- }
- }
-}
-
-impl From<extension::Symbol> for Symbol {
- fn from(value: extension::Symbol) -> Self {
- Self {
- kind: value.kind.into(),
- name: value.name,
- }
- }
-}
-
-impl From<extension::SymbolKind> for SymbolKind {
- fn from(value: extension::SymbolKind) -> Self {
- match value {
- extension::SymbolKind::File => Self::File,
- extension::SymbolKind::Module => Self::Module,
- extension::SymbolKind::Namespace => Self::Namespace,
- extension::SymbolKind::Package => Self::Package,
- extension::SymbolKind::Class => Self::Class,
- extension::SymbolKind::Method => Self::Method,
- extension::SymbolKind::Property => Self::Property,
- extension::SymbolKind::Field => Self::Field,
- extension::SymbolKind::Constructor => Self::Constructor,
- extension::SymbolKind::Enum => Self::Enum,
- extension::SymbolKind::Interface => Self::Interface,
- extension::SymbolKind::Function => Self::Function,
- extension::SymbolKind::Variable => Self::Variable,
- extension::SymbolKind::Constant => Self::Constant,
- extension::SymbolKind::String => Self::String,
- extension::SymbolKind::Number => Self::Number,
- extension::SymbolKind::Boolean => Self::Boolean,
- extension::SymbolKind::Array => Self::Array,
- extension::SymbolKind::Object => Self::Object,
- extension::SymbolKind::Key => Self::Key,
- extension::SymbolKind::Null => Self::Null,
- extension::SymbolKind::EnumMember => Self::EnumMember,
- extension::SymbolKind::Struct => Self::Struct,
- extension::SymbolKind::Event => Self::Event,
- extension::SymbolKind::Operator => Self::Operator,
- extension::SymbolKind::TypeParameter => Self::TypeParameter,
- extension::SymbolKind::Other(value) => Self::Other(value),
- }
- }
-}
-
-impl From<extension::SlashCommand> for SlashCommand {
- fn from(value: extension::SlashCommand) -> Self {
- Self {
- name: value.name,
- description: value.description,
- tooltip_text: value.tooltip_text,
- requires_argument: value.requires_argument,
- }
- }
-}
-
-impl From<SlashCommandOutput> for extension::SlashCommandOutput {
- fn from(value: SlashCommandOutput) -> Self {
- Self {
- text: value.text,
- sections: value.sections.into_iter().map(Into::into).collect(),
- }
- }
-}
-
-impl From<SlashCommandOutputSection> for extension::SlashCommandOutputSection {
- fn from(value: SlashCommandOutputSection) -> Self {
- Self {
- range: value.range.start as usize..value.range.end as usize,
- label: value.label,
- }
- }
-}
-
-impl From<SlashCommandArgumentCompletion> for extension::SlashCommandArgumentCompletion {
- fn from(value: SlashCommandArgumentCompletion) -> Self {
- Self {
- label: value.label,
- new_text: value.new_text,
- run_command: value.run_command,
+ DownloadedFileType::Gzip => Self::Gzip,
+ DownloadedFileType::GzipTar => Self::GzipTar,
+ DownloadedFileType::Zip => Self::Zip,
+ DownloadedFileType::Uncompressed => Self::Uncompressed,
}
}
}
-impl TryFrom<ContextServerConfiguration> for extension::ContextServerConfiguration {
- type Error = anyhow::Error;
-
- fn try_from(value: ContextServerConfiguration) -> Result<Self, Self::Error> {
- let settings_schema: serde_json::Value = serde_json::from_str(&value.settings_schema)
- .context("Failed to parse settings_schema")?;
-
- Ok(Self {
- installation_instructions: value.installation_instructions,
- default_settings: value.default_settings,
- settings_schema,
- })
- }
-}
-
impl HostKeyValueStore for WasmState {
async fn insert(
&mut self,
@@ -520,8 +113,7 @@ impl HostKeyValueStore for WasmState {
key: String,
value: String,
) -> wasmtime::Result<Result<(), String>> {
- let kv_store = self.table.get(&kv_store)?;
- kv_store.insert(key, value).await.to_wasmtime_result()
+ latest::HostKeyValueStore::insert(self, kv_store, key, value).await
}
async fn drop(&mut self, _worktree: Resource<ExtensionKeyValueStore>) -> Result<()> {
@@ -535,8 +127,7 @@ impl HostProject for WasmState {
&mut self,
project: Resource<ExtensionProject>,
) -> wasmtime::Result<Vec<u64>> {
- let project = self.table.get(&project)?;
- Ok(project.worktree_ids())
+ latest::HostProject::worktree_ids(self, project).await
}
async fn drop(&mut self, _project: Resource<Project>) -> Result<()> {
@@ -547,16 +138,14 @@ impl HostProject for WasmState {
impl HostWorktree for WasmState {
async fn id(&mut self, delegate: Resource<Arc<dyn WorktreeDelegate>>) -> wasmtime::Result<u64> {
- let delegate = self.table.get(&delegate)?;
- Ok(delegate.id())
+ latest::HostWorktree::id(self, delegate).await
}
async fn root_path(
&mut self,
delegate: Resource<Arc<dyn WorktreeDelegate>>,
) -> wasmtime::Result<String> {
- let delegate = self.table.get(&delegate)?;
- Ok(delegate.root_path())
+ latest::HostWorktree::root_path(self, delegate).await
}
async fn read_text_file(
@@ -564,19 +153,14 @@ impl HostWorktree for WasmState {
delegate: Resource<Arc<dyn WorktreeDelegate>>,
path: String,
) -> wasmtime::Result<Result<String, String>> {
- let delegate = self.table.get(&delegate)?;
- Ok(delegate
- .read_text_file(&RelPath::new(Path::new(&path), PathStyle::Posix)?)
- .await
- .map_err(|error| error.to_string()))
+ latest::HostWorktree::read_text_file(self, delegate, path).await
}
async fn shell_env(
&mut self,
delegate: Resource<Arc<dyn WorktreeDelegate>>,
) -> wasmtime::Result<EnvVars> {
- let delegate = self.table.get(&delegate)?;
- Ok(delegate.shell_env().await.into_iter().collect())
+ latest::HostWorktree::shell_env(self, delegate).await
}
async fn which(
@@ -584,8 +168,7 @@ impl HostWorktree for WasmState {
delegate: Resource<Arc<dyn WorktreeDelegate>>,
binary_name: String,
) -> wasmtime::Result<Option<String>> {
- let delegate = self.table.get(&delegate)?;
- Ok(delegate.which(binary_name).await)
+ latest::HostWorktree::which(self, delegate, binary_name).await
}
async fn drop(&mut self, _worktree: Resource<Worktree>) -> Result<()> {
@@ -594,319 +177,6 @@ impl HostWorktree for WasmState {
}
}
-impl common::Host for WasmState {}
-
-impl http_client::Host for WasmState {
- async fn fetch(
- &mut self,
- request: http_client::HttpRequest,
- ) -> wasmtime::Result<Result<http_client::HttpResponse, String>> {
- maybe!(async {
- let url = &request.url;
- let request = convert_request(&request)?;
- let mut response = self.host.http_client.send(request).await?;
-
- if response.status().is_client_error() || response.status().is_server_error() {
- bail!("failed to fetch '{url}': status code {}", response.status())
- }
- convert_response(&mut response).await
- })
- .await
- .to_wasmtime_result()
- }
-
- async fn fetch_stream(
- &mut self,
- request: http_client::HttpRequest,
- ) -> wasmtime::Result<Result<Resource<ExtensionHttpResponseStream>, String>> {
- let request = convert_request(&request)?;
- let response = self.host.http_client.send(request);
- maybe!(async {
- let response = response.await?;
- let stream = Arc::new(Mutex::new(response));
- let resource = self.table.push(stream)?;
- Ok(resource)
- })
- .await
- .to_wasmtime_result()
- }
-}
-
-impl http_client::HostHttpResponseStream for WasmState {
- async fn next_chunk(
- &mut self,
- resource: Resource<ExtensionHttpResponseStream>,
- ) -> wasmtime::Result<Result<Option<Vec<u8>>, String>> {
- let stream = self.table.get(&resource)?.clone();
- maybe!(async move {
- let mut response = stream.lock().await;
- let mut buffer = vec![0; 8192]; // 8KB buffer
- let bytes_read = response.body_mut().read(&mut buffer).await?;
- if bytes_read == 0 {
- Ok(None)
- } else {
- buffer.truncate(bytes_read);
- Ok(Some(buffer))
- }
- })
- .await
- .to_wasmtime_result()
- }
-
- async fn drop(&mut self, _resource: Resource<ExtensionHttpResponseStream>) -> Result<()> {
- Ok(())
- }
-}
-
-impl From<http_client::HttpMethod> for ::http_client::Method {
- fn from(value: http_client::HttpMethod) -> Self {
- match value {
- http_client::HttpMethod::Get => Self::GET,
- http_client::HttpMethod::Post => Self::POST,
- http_client::HttpMethod::Put => Self::PUT,
- http_client::HttpMethod::Delete => Self::DELETE,
- http_client::HttpMethod::Head => Self::HEAD,
- http_client::HttpMethod::Options => Self::OPTIONS,
- http_client::HttpMethod::Patch => Self::PATCH,
- }
- }
-}
-
-fn convert_request(
- extension_request: &http_client::HttpRequest,
-) -> anyhow::Result<::http_client::Request<AsyncBody>> {
- let mut request = ::http_client::Request::builder()
- .method(::http_client::Method::from(extension_request.method))
- .uri(&extension_request.url)
- .follow_redirects(match extension_request.redirect_policy {
- http_client::RedirectPolicy::NoFollow => ::http_client::RedirectPolicy::NoFollow,
- http_client::RedirectPolicy::FollowLimit(limit) => {
- ::http_client::RedirectPolicy::FollowLimit(limit)
- }
- http_client::RedirectPolicy::FollowAll => ::http_client::RedirectPolicy::FollowAll,
- });
- for (key, value) in &extension_request.headers {
- request = request.header(key, value);
- }
- let body = extension_request
- .body
- .clone()
- .map(AsyncBody::from)
- .unwrap_or_default();
- request.body(body).map_err(anyhow::Error::from)
-}
-
-async fn convert_response(
- response: &mut ::http_client::Response<AsyncBody>,
-) -> anyhow::Result<http_client::HttpResponse> {
- let mut extension_response = http_client::HttpResponse {
- body: Vec::new(),
- headers: Vec::new(),
- };
-
- for (key, value) in response.headers() {
- extension_response
- .headers
- .push((key.to_string(), value.to_str().unwrap_or("").to_string()));
- }
-
- response
- .body_mut()
- .read_to_end(&mut extension_response.body)
- .await?;
-
- Ok(extension_response)
-}
-
-impl nodejs::Host for WasmState {
- async fn node_binary_path(&mut self) -> wasmtime::Result<Result<String, String>> {
- self.host
- .node_runtime
- .binary_path()
- .await
- .map(|path| path.to_string_lossy().into_owned())
- .to_wasmtime_result()
- }
-
- async fn npm_package_latest_version(
- &mut self,
- package_name: String,
- ) -> wasmtime::Result<Result<String, String>> {
- self.host
- .node_runtime
- .npm_package_latest_version(&package_name)
- .await
- .to_wasmtime_result()
- }
-
- async fn npm_package_installed_version(
- &mut self,
- package_name: String,
- ) -> wasmtime::Result<Result<Option<String>, String>> {
- self.host
- .node_runtime
- .npm_package_installed_version(&self.work_dir(), &package_name)
- .await
- .to_wasmtime_result()
- }
-
- async fn npm_install_package(
- &mut self,
- package_name: String,
- version: String,
- ) -> wasmtime::Result<Result<(), String>> {
- self.capability_granter
- .grant_npm_install_package(&package_name)?;
-
- self.host
- .node_runtime
- .npm_install_packages(&self.work_dir(), &[(&package_name, &version)])
- .await
- .to_wasmtime_result()
- }
-}
-
-#[async_trait]
-impl lsp::Host for WasmState {}
-
-impl From<::http_client::github::GithubRelease> for github::GithubRelease {
- fn from(value: ::http_client::github::GithubRelease) -> Self {
- Self {
- version: value.tag_name,
- assets: value.assets.into_iter().map(Into::into).collect(),
- }
- }
-}
-
-impl From<::http_client::github::GithubReleaseAsset> for github::GithubReleaseAsset {
- fn from(value: ::http_client::github::GithubReleaseAsset) -> Self {
- Self {
- name: value.name,
- download_url: value.browser_download_url,
- }
- }
-}
-
-impl github::Host for WasmState {
- async fn latest_github_release(
- &mut self,
- repo: String,
- options: github::GithubReleaseOptions,
- ) -> wasmtime::Result<Result<github::GithubRelease, String>> {
- maybe!(async {
- let release = ::http_client::github::latest_github_release(
- &repo,
- options.require_assets,
- options.pre_release,
- self.host.http_client.clone(),
- )
- .await?;
- Ok(release.into())
- })
- .await
- .to_wasmtime_result()
- }
-
- async fn github_release_by_tag_name(
- &mut self,
- repo: String,
- tag: String,
- ) -> wasmtime::Result<Result<github::GithubRelease, String>> {
- maybe!(async {
- let release = ::http_client::github::get_release_by_tag_name(
- &repo,
- &tag,
- self.host.http_client.clone(),
- )
- .await?;
- Ok(release.into())
- })
- .await
- .to_wasmtime_result()
- }
-}
-
-impl platform::Host for WasmState {
- async fn current_platform(&mut self) -> Result<(platform::Os, platform::Architecture)> {
- Ok((
- match env::consts::OS {
- "macos" => platform::Os::Mac,
- "linux" => platform::Os::Linux,
- "windows" => platform::Os::Windows,
- _ => panic!("unsupported os"),
- },
- match env::consts::ARCH {
- "aarch64" => platform::Architecture::Aarch64,
- "x86" => platform::Architecture::X86,
- "x86_64" => platform::Architecture::X8664,
- _ => panic!("unsupported architecture"),
- },
- ))
- }
-}
-
-impl From<std::process::Output> for process::Output {
- fn from(output: std::process::Output) -> Self {
- Self {
- status: output.status.code(),
- stdout: output.stdout,
- stderr: output.stderr,
- }
- }
-}
-
-impl process::Host for WasmState {
- async fn run_command(
- &mut self,
- command: process::Command,
- ) -> wasmtime::Result<Result<process::Output, String>> {
- maybe!(async {
- self.capability_granter
- .grant_exec(&command.command, &command.args)?;
-
- let output = util::command::new_smol_command(command.command.as_str())
- .args(&command.args)
- .envs(command.env)
- .output()
- .await?;
-
- Ok(output.into())
- })
- .await
- .to_wasmtime_result()
- }
-}
-
-#[async_trait]
-impl slash_command::Host for WasmState {}
-
-#[async_trait]
-impl context_server::Host for WasmState {}
-
-impl dap::Host for WasmState {
- async fn resolve_tcp_template(
- &mut self,
- template: TcpArgumentsTemplate,
- ) -> wasmtime::Result<Result<TcpArguments, String>> {
- maybe!(async {
- let (host, port, timeout) =
- ::dap::configure_tcp_connection(task::TcpArgumentsTemplate {
- port: template.port,
- host: template.host.map(Ipv4Addr::from_bits),
- timeout: template.timeout,
- })
- .await?;
- Ok(TcpArguments {
- port,
- host: host.to_bits(),
- timeout,
- })
- })
- .await
- .to_wasmtime_result()
- }
-}
-
impl ExtensionImports for WasmState {
async fn get_settings(
&mut self,
@@ -914,96 +184,13 @@ impl ExtensionImports for WasmState {
category: String,
key: Option<String>,
) -> wasmtime::Result<Result<String, String>> {
- self.on_main_thread(|cx| {
- async move {
- let path = location.as_ref().and_then(|location| {
- RelPath::new(Path::new(&location.path), PathStyle::Posix).ok()
- });
- let location = path
- .as_ref()
- .zip(location.as_ref())
- .map(|(path, location)| ::settings::SettingsLocation {
- worktree_id: WorktreeId::from_proto(location.worktree_id),
- path,
- });
-
- cx.update(|cx| match category.as_str() {
- "language" => {
- let key = key.map(|k| LanguageName::new(&k));
- let settings = AllLanguageSettings::get(location, cx).language(
- location,
- key.as_ref(),
- cx,
- );
- Ok(serde_json::to_string(&settings::LanguageSettings {
- tab_size: settings.tab_size,
- })?)
- }
- "lsp" => {
- let settings = key
- .and_then(|key| {
- ProjectSettings::get(location, cx)
- .lsp
- .get(&::lsp::LanguageServerName::from_proto(key))
- })
- .cloned()
- .unwrap_or_default();
- Ok(serde_json::to_string(&settings::LspSettings {
- binary: settings.binary.map(|binary| settings::CommandSettings {
- path: binary.path,
- arguments: binary.arguments,
- env: binary.env.map(|env| env.into_iter().collect()),
- }),
- settings: settings.settings,
- initialization_options: settings.initialization_options,
- })?)
- }
- "context_servers" => {
- let settings = key
- .and_then(|key| {
- ProjectSettings::get(location, cx)
- .context_servers
- .get(key.as_str())
- })
- .cloned()
- .unwrap_or_else(|| {
- project::project_settings::ContextServerSettings::default_extension(
- )
- });
-
- match settings {
- project::project_settings::ContextServerSettings::Stdio {
- enabled: _,
- command,
- } => Ok(serde_json::to_string(&settings::ContextServerSettings {
- command: Some(settings::CommandSettings {
- path: command.path.to_str().map(|path| path.to_string()),
- arguments: Some(command.args),
- env: command.env.map(|env| env.into_iter().collect()),
- }),
- settings: None,
- })?),
- project::project_settings::ContextServerSettings::Extension {
- enabled: _,
- settings,
- } => Ok(serde_json::to_string(&settings::ContextServerSettings {
- command: None,
- settings: Some(settings),
- })?),
- project::project_settings::ContextServerSettings::Http { .. } => {
- bail!("remote context server settings not supported in 0.6.0")
- }
- }
- }
- _ => {
- bail!("Unknown settings category: {}", category);
- }
- })
- }
- .boxed_local()
- })
- .await?
- .to_wasmtime_result()
+ latest::ExtensionImports::get_settings(
+ self,
+ location.map(|location| location.into()),
+ category,
+ key,
+ )
+ .await
}
async fn set_language_server_installation_status(
@@ -0,0 +1,1196 @@
+use crate::wasm_host::wit::since_v0_7_0::{
+ dap::{
+ AttachRequest, BuildTaskDefinition, BuildTaskDefinitionTemplatePayload, LaunchRequest,
+ StartDebuggingRequestArguments, TcpArguments, TcpArgumentsTemplate,
+ },
+ lsp::{CompletionKind, CompletionLabelDetails, InsertTextFormat, SymbolKind},
+ slash_command::SlashCommandOutputSection,
+};
+use crate::wasm_host::{WasmState, wit::ToWasmtimeResult};
+use ::http_client::{AsyncBody, HttpRequestExt};
+use ::settings::{Settings, WorktreeId};
+use anyhow::{Context as _, Result, bail};
+use async_compression::futures::bufread::GzipDecoder;
+use async_tar::Archive;
+use async_trait::async_trait;
+use extension::{
+ ExtensionLanguageServerProxy, KeyValueStoreDelegate, ProjectDelegate, WorktreeDelegate,
+};
+use futures::{AsyncReadExt, lock::Mutex};
+use futures::{FutureExt as _, io::BufReader};
+use gpui::{BackgroundExecutor, SharedString};
+use language::{BinaryStatus, LanguageName, language_settings::AllLanguageSettings};
+use project::project_settings::ProjectSettings;
+use semver::Version;
+use std::{
+ env,
+ net::Ipv4Addr,
+ path::{Path, PathBuf},
+ str::FromStr,
+ sync::{Arc, OnceLock},
+};
+use task::{SpawnInTerminal, ZedDebugConfig};
+use url::Url;
+use util::{
+ archive::extract_zip, fs::make_file_executable, maybe, paths::PathStyle, rel_path::RelPath,
+};
+use wasmtime::component::{Linker, Resource};
+
+pub const MIN_VERSION: Version = Version::new(0, 7, 0);
+pub const MAX_VERSION: Version = Version::new(0, 8, 0);
+
+wasmtime::component::bindgen!({
+ async: true,
+ trappable_imports: true,
+ path: "../extension_api/wit/since_v0.7.0",
+ with: {
+ "worktree": ExtensionWorktree,
+ "project": ExtensionProject,
+ "key-value-store": ExtensionKeyValueStore,
+ "zed:extension/http-client/http-response-stream": ExtensionHttpResponseStream,
+ },
+});
+
+// This is the latest version, so we pub use to make types available to parent module.
+// Note: The parent wit.rs module re-exports specific types from here as the "latest" types.
+pub use self::zed::extension::*;
+
+mod settings {
+ #![allow(dead_code)]
+ include!(concat!(env!("OUT_DIR"), "/since_v0.7.0/settings.rs"));
+}
+
+pub type ExtensionWorktree = Arc<dyn WorktreeDelegate>;
+pub type ExtensionProject = Arc<dyn ProjectDelegate>;
+pub type ExtensionKeyValueStore = Arc<dyn KeyValueStoreDelegate>;
+pub type ExtensionHttpResponseStream = Arc<Mutex<::http_client::Response<AsyncBody>>>;
+
+pub fn linker(executor: &BackgroundExecutor) -> &'static Linker<WasmState> {
+ static LINKER: OnceLock<Linker<WasmState>> = OnceLock::new();
+ LINKER.get_or_init(|| super::new_linker(executor, Extension::add_to_linker))
+}
+
+impl From<Range> for std::ops::Range<usize> {
+ fn from(range: Range) -> Self {
+ let start = range.start as usize;
+ let end = range.end as usize;
+ start..end
+ }
+}
+
+impl From<Command> for extension::Command {
+ fn from(value: Command) -> Self {
+ Self {
+ command: value.command.into(),
+ args: value.args,
+ env: value.env,
+ }
+ }
+}
+
+impl From<StartDebuggingRequestArgumentsRequest>
+ for extension::StartDebuggingRequestArgumentsRequest
+{
+ fn from(value: StartDebuggingRequestArgumentsRequest) -> Self {
+ match value {
+ StartDebuggingRequestArgumentsRequest::Launch => Self::Launch,
+ StartDebuggingRequestArgumentsRequest::Attach => Self::Attach,
+ }
+ }
+}
+impl TryFrom<StartDebuggingRequestArguments> for extension::StartDebuggingRequestArguments {
+ type Error = anyhow::Error;
+
+ fn try_from(value: StartDebuggingRequestArguments) -> Result<Self, Self::Error> {
+ Ok(Self {
+ configuration: serde_json::from_str(&value.configuration)?,
+ request: value.request.into(),
+ })
+ }
+}
+impl From<TcpArguments> for extension::TcpArguments {
+ fn from(value: TcpArguments) -> Self {
+ Self {
+ host: value.host.into(),
+ port: value.port,
+ timeout: value.timeout,
+ }
+ }
+}
+
+impl From<extension::TcpArgumentsTemplate> for TcpArgumentsTemplate {
+ fn from(value: extension::TcpArgumentsTemplate) -> Self {
+ Self {
+ host: value.host.map(Ipv4Addr::to_bits),
+ port: value.port,
+ timeout: value.timeout,
+ }
+ }
+}
+
+impl From<TcpArgumentsTemplate> for extension::TcpArgumentsTemplate {
+ fn from(value: TcpArgumentsTemplate) -> Self {
+ Self {
+ host: value.host.map(Ipv4Addr::from_bits),
+ port: value.port,
+ timeout: value.timeout,
+ }
+ }
+}
+
+impl TryFrom<extension::DebugTaskDefinition> for DebugTaskDefinition {
+ type Error = anyhow::Error;
+ fn try_from(value: extension::DebugTaskDefinition) -> Result<Self, Self::Error> {
+ Ok(Self {
+ label: value.label.to_string(),
+ adapter: value.adapter.to_string(),
+ config: value.config.to_string(),
+ tcp_connection: value.tcp_connection.map(Into::into),
+ })
+ }
+}
+
+impl From<task::DebugRequest> for DebugRequest {
+ fn from(value: task::DebugRequest) -> Self {
+ match value {
+ task::DebugRequest::Launch(launch_request) => Self::Launch(launch_request.into()),
+ task::DebugRequest::Attach(attach_request) => Self::Attach(attach_request.into()),
+ }
+ }
+}
+
+impl From<DebugRequest> for task::DebugRequest {
+ fn from(value: DebugRequest) -> Self {
+ match value {
+ DebugRequest::Launch(launch_request) => Self::Launch(launch_request.into()),
+ DebugRequest::Attach(attach_request) => Self::Attach(attach_request.into()),
+ }
+ }
+}
+
+impl From<task::LaunchRequest> for LaunchRequest {
+ fn from(value: task::LaunchRequest) -> Self {
+ Self {
+ program: value.program,
+ cwd: value.cwd.map(|p| p.to_string_lossy().into_owned()),
+ args: value.args,
+ envs: value.env.into_iter().collect(),
+ }
+ }
+}
+
+impl From<task::AttachRequest> for AttachRequest {
+ fn from(value: task::AttachRequest) -> Self {
+ Self {
+ process_id: value.process_id,
+ }
+ }
+}
+
+impl From<LaunchRequest> for task::LaunchRequest {
+ fn from(value: LaunchRequest) -> Self {
+ Self {
+ program: value.program,
+ cwd: value.cwd.map(|p| p.into()),
+ args: value.args,
+ env: value.envs.into_iter().collect(),
+ }
+ }
+}
+impl From<AttachRequest> for task::AttachRequest {
+ fn from(value: AttachRequest) -> Self {
+ Self {
+ process_id: value.process_id,
+ }
+ }
+}
+
+impl From<ZedDebugConfig> for DebugConfig {
+ fn from(value: ZedDebugConfig) -> Self {
+ Self {
+ label: value.label.into(),
+ adapter: value.adapter.into(),
+ request: value.request.into(),
+ stop_on_entry: value.stop_on_entry,
+ }
+ }
+}
+impl TryFrom<DebugAdapterBinary> for extension::DebugAdapterBinary {
+ type Error = anyhow::Error;
+ fn try_from(value: DebugAdapterBinary) -> Result<Self, Self::Error> {
+ Ok(Self {
+ command: value.command,
+ arguments: value.arguments,
+ envs: value.envs.into_iter().collect(),
+ cwd: value.cwd.map(|s| s.into()),
+ connection: value.connection.map(Into::into),
+ request_args: value.request_args.try_into()?,
+ })
+ }
+}
+
+impl From<BuildTaskDefinition> for extension::BuildTaskDefinition {
+ fn from(value: BuildTaskDefinition) -> Self {
+ match value {
+ BuildTaskDefinition::ByName(name) => Self::ByName(name.into()),
+ BuildTaskDefinition::Template(build_task_template) => Self::Template {
+ task_template: build_task_template.template.into(),
+ locator_name: build_task_template.locator_name.map(SharedString::from),
+ },
+ }
+ }
+}
+
+impl From<extension::BuildTaskDefinition> for BuildTaskDefinition {
+ fn from(value: extension::BuildTaskDefinition) -> Self {
+ match value {
+ extension::BuildTaskDefinition::ByName(name) => Self::ByName(name.into()),
+ extension::BuildTaskDefinition::Template {
+ task_template,
+ locator_name,
+ } => Self::Template(BuildTaskDefinitionTemplatePayload {
+ template: task_template.into(),
+ locator_name: locator_name.map(String::from),
+ }),
+ }
+ }
+}
+impl From<BuildTaskTemplate> for extension::BuildTaskTemplate {
+ fn from(value: BuildTaskTemplate) -> Self {
+ Self {
+ label: value.label,
+ command: value.command,
+ args: value.args,
+ env: value.env.into_iter().collect(),
+ cwd: value.cwd,
+ ..Default::default()
+ }
+ }
+}
+impl From<extension::BuildTaskTemplate> for BuildTaskTemplate {
+ fn from(value: extension::BuildTaskTemplate) -> Self {
+ Self {
+ label: value.label,
+ command: value.command,
+ args: value.args,
+ env: value.env.into_iter().collect(),
+ cwd: value.cwd,
+ }
+ }
+}
+
+impl TryFrom<DebugScenario> for extension::DebugScenario {
+ type Error = anyhow::Error;
+
+ fn try_from(value: DebugScenario) -> std::result::Result<Self, Self::Error> {
+ Ok(Self {
+ adapter: value.adapter.into(),
+ label: value.label.into(),
+ build: value.build.map(Into::into),
+ config: serde_json::Value::from_str(&value.config)?,
+ tcp_connection: value.tcp_connection.map(Into::into),
+ })
+ }
+}
+
+impl From<extension::DebugScenario> for DebugScenario {
+ fn from(value: extension::DebugScenario) -> Self {
+ Self {
+ adapter: value.adapter.into(),
+ label: value.label.into(),
+ build: value.build.map(Into::into),
+ config: value.config.to_string(),
+ tcp_connection: value.tcp_connection.map(Into::into),
+ }
+ }
+}
+
+impl TryFrom<SpawnInTerminal> for ResolvedTask {
+ type Error = anyhow::Error;
+
+ fn try_from(value: SpawnInTerminal) -> Result<Self, Self::Error> {
+ Ok(Self {
+ label: value.label,
+ command: value.command.context("missing command")?,
+ args: value.args,
+ env: value.env.into_iter().collect(),
+ cwd: value.cwd.map(|s| {
+ let s = s.to_string_lossy();
+ if cfg!(target_os = "windows") {
+ s.replace('\\', "/")
+ } else {
+ s.into_owned()
+ }
+ }),
+ })
+ }
+}
+
+impl From<CodeLabel> for extension::CodeLabel {
+ fn from(value: CodeLabel) -> Self {
+ Self {
+ code: value.code,
+ spans: value.spans.into_iter().map(Into::into).collect(),
+ filter_range: value.filter_range.into(),
+ }
+ }
+}
+
+impl From<CodeLabelSpan> for extension::CodeLabelSpan {
+ fn from(value: CodeLabelSpan) -> Self {
+ match value {
+ CodeLabelSpan::CodeRange(range) => Self::CodeRange(range.into()),
+ CodeLabelSpan::Literal(literal) => Self::Literal(literal.into()),
+ }
+ }
+}
+
+impl From<CodeLabelSpanLiteral> for extension::CodeLabelSpanLiteral {
+ fn from(value: CodeLabelSpanLiteral) -> Self {
+ Self {
+ text: value.text,
+ highlight_name: value.highlight_name,
+ }
+ }
+}
+
+impl From<extension::Completion> for Completion {
+ fn from(value: extension::Completion) -> Self {
+ Self {
+ label: value.label,
+ label_details: value.label_details.map(Into::into),
+ detail: value.detail,
+ kind: value.kind.map(Into::into),
+ insert_text_format: value.insert_text_format.map(Into::into),
+ }
+ }
+}
+
+impl From<extension::CompletionLabelDetails> for CompletionLabelDetails {
+ fn from(value: extension::CompletionLabelDetails) -> Self {
+ Self {
+ detail: value.detail,
+ description: value.description,
+ }
+ }
+}
+
+impl From<extension::CompletionKind> for CompletionKind {
+ fn from(value: extension::CompletionKind) -> Self {
+ match value {
+ extension::CompletionKind::Text => Self::Text,
+ extension::CompletionKind::Method => Self::Method,
+ extension::CompletionKind::Function => Self::Function,
+ extension::CompletionKind::Constructor => Self::Constructor,
+ extension::CompletionKind::Field => Self::Field,
+ extension::CompletionKind::Variable => Self::Variable,
+ extension::CompletionKind::Class => Self::Class,
+ extension::CompletionKind::Interface => Self::Interface,
+ extension::CompletionKind::Module => Self::Module,
+ extension::CompletionKind::Property => Self::Property,
+ extension::CompletionKind::Unit => Self::Unit,
+ extension::CompletionKind::Value => Self::Value,
+ extension::CompletionKind::Enum => Self::Enum,
+ extension::CompletionKind::Keyword => Self::Keyword,
+ extension::CompletionKind::Snippet => Self::Snippet,
+ extension::CompletionKind::Color => Self::Color,
+ extension::CompletionKind::File => Self::File,
+ extension::CompletionKind::Reference => Self::Reference,
+ extension::CompletionKind::Folder => Self::Folder,
+ extension::CompletionKind::EnumMember => Self::EnumMember,
+ extension::CompletionKind::Constant => Self::Constant,
+ extension::CompletionKind::Struct => Self::Struct,
+ extension::CompletionKind::Event => Self::Event,
+ extension::CompletionKind::Operator => Self::Operator,
+ extension::CompletionKind::TypeParameter => Self::TypeParameter,
+ extension::CompletionKind::Other(value) => Self::Other(value),
+ }
+ }
+}
+
+impl From<extension::InsertTextFormat> for InsertTextFormat {
+ fn from(value: extension::InsertTextFormat) -> Self {
+ match value {
+ extension::InsertTextFormat::PlainText => Self::PlainText,
+ extension::InsertTextFormat::Snippet => Self::Snippet,
+ extension::InsertTextFormat::Other(value) => Self::Other(value),
+ }
+ }
+}
+
+impl From<extension::Symbol> for Symbol {
+ fn from(value: extension::Symbol) -> Self {
+ Self {
+ kind: value.kind.into(),
+ name: value.name,
+ }
+ }
+}
+
+impl From<extension::SymbolKind> for SymbolKind {
+ fn from(value: extension::SymbolKind) -> Self {
+ match value {
+ extension::SymbolKind::File => Self::File,
+ extension::SymbolKind::Module => Self::Module,
+ extension::SymbolKind::Namespace => Self::Namespace,
+ extension::SymbolKind::Package => Self::Package,
+ extension::SymbolKind::Class => Self::Class,
+ extension::SymbolKind::Method => Self::Method,
+ extension::SymbolKind::Property => Self::Property,
+ extension::SymbolKind::Field => Self::Field,
+ extension::SymbolKind::Constructor => Self::Constructor,
+ extension::SymbolKind::Enum => Self::Enum,
+ extension::SymbolKind::Interface => Self::Interface,
+ extension::SymbolKind::Function => Self::Function,
+ extension::SymbolKind::Variable => Self::Variable,
+ extension::SymbolKind::Constant => Self::Constant,
+ extension::SymbolKind::String => Self::String,
+ extension::SymbolKind::Number => Self::Number,
+ extension::SymbolKind::Boolean => Self::Boolean,
+ extension::SymbolKind::Array => Self::Array,
+ extension::SymbolKind::Object => Self::Object,
+ extension::SymbolKind::Key => Self::Key,
+ extension::SymbolKind::Null => Self::Null,
+ extension::SymbolKind::EnumMember => Self::EnumMember,
+ extension::SymbolKind::Struct => Self::Struct,
+ extension::SymbolKind::Event => Self::Event,
+ extension::SymbolKind::Operator => Self::Operator,
+ extension::SymbolKind::TypeParameter => Self::TypeParameter,
+ extension::SymbolKind::Other(value) => Self::Other(value),
+ }
+ }
+}
+
+impl From<extension::SlashCommand> for SlashCommand {
+ fn from(value: extension::SlashCommand) -> Self {
+ Self {
+ name: value.name,
+ description: value.description,
+ tooltip_text: value.tooltip_text,
+ requires_argument: value.requires_argument,
+ }
+ }
+}
+
+impl From<SlashCommandOutput> for extension::SlashCommandOutput {
+ fn from(value: SlashCommandOutput) -> Self {
+ Self {
+ text: value.text,
+ sections: value.sections.into_iter().map(Into::into).collect(),
+ }
+ }
+}
+
+impl From<SlashCommandOutputSection> for extension::SlashCommandOutputSection {
+ fn from(value: SlashCommandOutputSection) -> Self {
+ Self {
+ range: value.range.start as usize..value.range.end as usize,
+ label: value.label,
+ }
+ }
+}
+
+impl From<SlashCommandArgumentCompletion> for extension::SlashCommandArgumentCompletion {
+ fn from(value: SlashCommandArgumentCompletion) -> Self {
+ Self {
+ label: value.label,
+ new_text: value.new_text,
+ run_command: value.run_command,
+ }
+ }
+}
+
+impl TryFrom<ContextServerConfiguration> for extension::ContextServerConfiguration {
+ type Error = anyhow::Error;
+
+ fn try_from(value: ContextServerConfiguration) -> Result<Self, Self::Error> {
+ let settings_schema: serde_json::Value = serde_json::from_str(&value.settings_schema)
+ .context("Failed to parse settings_schema")?;
+
+ Ok(Self {
+ installation_instructions: value.installation_instructions,
+ default_settings: value.default_settings,
+ settings_schema,
+ })
+ }
+}
+
+impl HostKeyValueStore for WasmState {
+ async fn insert(
+ &mut self,
+ kv_store: Resource<ExtensionKeyValueStore>,
+ key: String,
+ value: String,
+ ) -> wasmtime::Result<Result<(), String>> {
+ let kv_store = self.table.get(&kv_store)?;
+ kv_store.insert(key, value).await.to_wasmtime_result()
+ }
+
+ async fn drop(&mut self, _worktree: Resource<ExtensionKeyValueStore>) -> Result<()> {
+ // We only ever hand out borrows of key-value stores.
+ Ok(())
+ }
+}
+
+impl HostProject for WasmState {
+ async fn worktree_ids(
+ &mut self,
+ project: Resource<ExtensionProject>,
+ ) -> wasmtime::Result<Vec<u64>> {
+ let project = self.table.get(&project)?;
+ Ok(project.worktree_ids())
+ }
+
+ async fn drop(&mut self, _project: Resource<Project>) -> Result<()> {
+ // We only ever hand out borrows of projects.
+ Ok(())
+ }
+}
+
+impl HostWorktree for WasmState {
+ async fn id(&mut self, delegate: Resource<Arc<dyn WorktreeDelegate>>) -> wasmtime::Result<u64> {
+ let delegate = self.table.get(&delegate)?;
+ Ok(delegate.id())
+ }
+
+ async fn root_path(
+ &mut self,
+ delegate: Resource<Arc<dyn WorktreeDelegate>>,
+ ) -> wasmtime::Result<String> {
+ let delegate = self.table.get(&delegate)?;
+ Ok(delegate.root_path())
+ }
+
+ async fn read_text_file(
+ &mut self,
+ delegate: Resource<Arc<dyn WorktreeDelegate>>,
+ path: String,
+ ) -> wasmtime::Result<Result<String, String>> {
+ let delegate = self.table.get(&delegate)?;
+ Ok(delegate
+ .read_text_file(&RelPath::new(Path::new(&path), PathStyle::Posix)?)
+ .await
+ .map_err(|error| error.to_string()))
+ }
+
+ async fn shell_env(
+ &mut self,
+ delegate: Resource<Arc<dyn WorktreeDelegate>>,
+ ) -> wasmtime::Result<EnvVars> {
+ let delegate = self.table.get(&delegate)?;
+ Ok(delegate.shell_env().await.into_iter().collect())
+ }
+
+ async fn which(
+ &mut self,
+ delegate: Resource<Arc<dyn WorktreeDelegate>>,
+ binary_name: String,
+ ) -> wasmtime::Result<Option<String>> {
+ let delegate = self.table.get(&delegate)?;
+ Ok(delegate.which(binary_name).await)
+ }
+
+ async fn drop(&mut self, _worktree: Resource<Worktree>) -> Result<()> {
+ // We only ever hand out borrows of worktrees.
+ Ok(())
+ }
+}
+
+impl common::Host for WasmState {}
+
+impl http_client::Host for WasmState {
+ async fn fetch(
+ &mut self,
+ request: http_client::HttpRequest,
+ ) -> wasmtime::Result<Result<http_client::HttpResponse, String>> {
+ maybe!(async {
+ let url = &request.url;
+ let request = convert_request(&request)?;
+ let mut response = self.host.http_client.send(request).await?;
+
+ if response.status().is_client_error() || response.status().is_server_error() {
+ bail!("failed to fetch '{url}': status code {}", response.status())
+ }
+ convert_response(&mut response).await
+ })
+ .await
+ .to_wasmtime_result()
+ }
+
+ async fn fetch_stream(
+ &mut self,
+ request: http_client::HttpRequest,
+ ) -> wasmtime::Result<Result<Resource<ExtensionHttpResponseStream>, String>> {
+ let request = convert_request(&request)?;
+ let response = self.host.http_client.send(request);
+ maybe!(async {
+ let response = response.await?;
+ let stream = Arc::new(Mutex::new(response));
+ let resource = self.table.push(stream)?;
+ Ok(resource)
+ })
+ .await
+ .to_wasmtime_result()
+ }
+}
+
+impl http_client::HostHttpResponseStream for WasmState {
+ async fn next_chunk(
+ &mut self,
+ resource: Resource<ExtensionHttpResponseStream>,
+ ) -> wasmtime::Result<Result<Option<Vec<u8>>, String>> {
+ let stream = self.table.get(&resource)?.clone();
+ maybe!(async move {
+ let mut response = stream.lock().await;
+ let mut buffer = vec![0; 8192]; // 8KB buffer
+ let bytes_read = response.body_mut().read(&mut buffer).await?;
+ if bytes_read == 0 {
+ Ok(None)
+ } else {
+ buffer.truncate(bytes_read);
+ Ok(Some(buffer))
+ }
+ })
+ .await
+ .to_wasmtime_result()
+ }
+
+ async fn drop(&mut self, _resource: Resource<ExtensionHttpResponseStream>) -> Result<()> {
+ Ok(())
+ }
+}
+
+impl From<http_client::HttpMethod> for ::http_client::Method {
+ fn from(value: http_client::HttpMethod) -> Self {
+ match value {
+ http_client::HttpMethod::Get => Self::GET,
+ http_client::HttpMethod::Post => Self::POST,
+ http_client::HttpMethod::Put => Self::PUT,
+ http_client::HttpMethod::Delete => Self::DELETE,
+ http_client::HttpMethod::Head => Self::HEAD,
+ http_client::HttpMethod::Options => Self::OPTIONS,
+ http_client::HttpMethod::Patch => Self::PATCH,
+ }
+ }
+}
+
+fn convert_request(
+ extension_request: &http_client::HttpRequest,
+) -> anyhow::Result<::http_client::Request<AsyncBody>> {
+ let mut request = ::http_client::Request::builder()
+ .method(::http_client::Method::from(extension_request.method))
+ .uri(&extension_request.url)
+ .follow_redirects(match extension_request.redirect_policy {
+ http_client::RedirectPolicy::NoFollow => ::http_client::RedirectPolicy::NoFollow,
+ http_client::RedirectPolicy::FollowLimit(limit) => {
+ ::http_client::RedirectPolicy::FollowLimit(limit)
+ }
+ http_client::RedirectPolicy::FollowAll => ::http_client::RedirectPolicy::FollowAll,
+ });
+ for (key, value) in &extension_request.headers {
+ request = request.header(key, value);
+ }
+ let body = extension_request
+ .body
+ .clone()
+ .map(AsyncBody::from)
+ .unwrap_or_default();
+ request.body(body).map_err(anyhow::Error::from)
+}
+
+async fn convert_response(
+ response: &mut ::http_client::Response<AsyncBody>,
+) -> anyhow::Result<http_client::HttpResponse> {
+ let mut extension_response = http_client::HttpResponse {
+ body: Vec::new(),
+ headers: Vec::new(),
+ };
+
+ for (key, value) in response.headers() {
+ extension_response
+ .headers
+ .push((key.to_string(), value.to_str().unwrap_or("").to_string()));
+ }
+
+ response
+ .body_mut()
+ .read_to_end(&mut extension_response.body)
+ .await?;
+
+ Ok(extension_response)
+}
+
+impl nodejs::Host for WasmState {
+ async fn node_binary_path(&mut self) -> wasmtime::Result<Result<String, String>> {
+ self.host
+ .node_runtime
+ .binary_path()
+ .await
+ .map(|path| path.to_string_lossy().into_owned())
+ .to_wasmtime_result()
+ }
+
+ async fn npm_package_latest_version(
+ &mut self,
+ package_name: String,
+ ) -> wasmtime::Result<Result<String, String>> {
+ self.host
+ .node_runtime
+ .npm_package_latest_version(&package_name)
+ .await
+ .to_wasmtime_result()
+ }
+
+ async fn npm_package_installed_version(
+ &mut self,
+ package_name: String,
+ ) -> wasmtime::Result<Result<Option<String>, String>> {
+ self.host
+ .node_runtime
+ .npm_package_installed_version(&self.work_dir(), &package_name)
+ .await
+ .to_wasmtime_result()
+ }
+
+ async fn npm_install_package(
+ &mut self,
+ package_name: String,
+ version: String,
+ ) -> wasmtime::Result<Result<(), String>> {
+ self.capability_granter
+ .grant_npm_install_package(&package_name)?;
+
+ self.host
+ .node_runtime
+ .npm_install_packages(&self.work_dir(), &[(&package_name, &version)])
+ .await
+ .to_wasmtime_result()
+ }
+}
+
+#[async_trait]
+impl lsp::Host for WasmState {}
+
+impl From<::http_client::github::GithubRelease> for github::GithubRelease {
+ fn from(value: ::http_client::github::GithubRelease) -> Self {
+ Self {
+ version: value.tag_name,
+ assets: value.assets.into_iter().map(Into::into).collect(),
+ }
+ }
+}
+
+impl From<::http_client::github::GithubReleaseAsset> for github::GithubReleaseAsset {
+ fn from(value: ::http_client::github::GithubReleaseAsset) -> Self {
+ Self {
+ name: value.name,
+ download_url: value.browser_download_url,
+ }
+ }
+}
+
+impl github::Host for WasmState {
+ async fn latest_github_release(
+ &mut self,
+ repo: String,
+ options: github::GithubReleaseOptions,
+ ) -> wasmtime::Result<Result<github::GithubRelease, String>> {
+ maybe!(async {
+ let release = ::http_client::github::latest_github_release(
+ &repo,
+ options.require_assets,
+ options.pre_release,
+ self.host.http_client.clone(),
+ )
+ .await?;
+ Ok(release.into())
+ })
+ .await
+ .to_wasmtime_result()
+ }
+
+ async fn github_release_by_tag_name(
+ &mut self,
+ repo: String,
+ tag: String,
+ ) -> wasmtime::Result<Result<github::GithubRelease, String>> {
+ maybe!(async {
+ let release = ::http_client::github::get_release_by_tag_name(
+ &repo,
+ &tag,
+ self.host.http_client.clone(),
+ )
+ .await?;
+ Ok(release.into())
+ })
+ .await
+ .to_wasmtime_result()
+ }
+}
+
+impl platform::Host for WasmState {
+ async fn current_platform(&mut self) -> Result<(platform::Os, platform::Architecture)> {
+ Ok((
+ match env::consts::OS {
+ "macos" => platform::Os::Mac,
+ "linux" => platform::Os::Linux,
+ "windows" => platform::Os::Windows,
+ _ => panic!("unsupported os"),
+ },
+ match env::consts::ARCH {
+ "aarch64" => platform::Architecture::Aarch64,
+ "x86" => platform::Architecture::X86,
+ "x86_64" => platform::Architecture::X8664,
+ _ => panic!("unsupported architecture"),
+ },
+ ))
+ }
+}
+
+impl From<std::process::Output> for process::Output {
+ fn from(output: std::process::Output) -> Self {
+ Self {
+ status: output.status.code(),
+ stdout: output.stdout,
+ stderr: output.stderr,
+ }
+ }
+}
+
+impl process::Host for WasmState {
+ async fn run_command(
+ &mut self,
+ command: process::Command,
+ ) -> wasmtime::Result<Result<process::Output, String>> {
+ maybe!(async {
+ self.capability_granter
+ .grant_exec(&command.command, &command.args)?;
+
+ let output = util::command::new_smol_command(command.command.as_str())
+ .args(&command.args)
+ .envs(command.env)
+ .output()
+ .await?;
+
+ Ok(output.into())
+ })
+ .await
+ .to_wasmtime_result()
+ }
+}
+
+#[async_trait]
+impl slash_command::Host for WasmState {}
+
+#[async_trait]
+impl context_server::Host for WasmState {}
+
+impl dap::Host for WasmState {
+ async fn resolve_tcp_template(
+ &mut self,
+ template: TcpArgumentsTemplate,
+ ) -> wasmtime::Result<Result<TcpArguments, String>> {
+ maybe!(async {
+ let (host, port, timeout) =
+ ::dap::configure_tcp_connection(task::TcpArgumentsTemplate {
+ port: template.port,
+ host: template.host.map(Ipv4Addr::from_bits),
+ timeout: template.timeout,
+ })
+ .await?;
+ Ok(TcpArguments {
+ port,
+ host: host.to_bits(),
+ timeout,
+ })
+ })
+ .await
+ .to_wasmtime_result()
+ }
+}
+
+impl ExtensionImports for WasmState {
+ async fn get_settings(
+ &mut self,
+ location: Option<self::SettingsLocation>,
+ category: String,
+ key: Option<String>,
+ ) -> wasmtime::Result<Result<String, String>> {
+ self.on_main_thread(|cx| {
+ async move {
+ let path = location.as_ref().and_then(|location| {
+ RelPath::new(Path::new(&location.path), PathStyle::Posix).ok()
+ });
+ let location = path
+ .as_ref()
+ .zip(location.as_ref())
+ .map(|(path, location)| ::settings::SettingsLocation {
+ worktree_id: WorktreeId::from_proto(location.worktree_id),
+ path,
+ });
+
+ cx.update(|cx| match category.as_str() {
+ "language" => {
+ let key = key.map(|k| LanguageName::new(&k));
+ let settings = AllLanguageSettings::get(location, cx).language(
+ location,
+ key.as_ref(),
+ cx,
+ );
+ Ok(serde_json::to_string(&settings::LanguageSettings {
+ tab_size: settings.tab_size,
+ })?)
+ }
+ "lsp" => {
+ let settings = key
+ .and_then(|key| {
+ ProjectSettings::get(location, cx)
+ .lsp
+ .get(&::lsp::LanguageServerName::from_proto(key))
+ })
+ .cloned()
+ .unwrap_or_default();
+ Ok(serde_json::to_string(&settings::LspSettings {
+ binary: settings.binary.map(|binary| settings::CommandSettings {
+ path: binary.path,
+ arguments: binary.arguments,
+ env: binary.env.map(|env| env.into_iter().collect()),
+ }),
+ settings: settings.settings,
+ initialization_options: settings.initialization_options,
+ })?)
+ }
+ "context_servers" => {
+ let settings = key
+ .and_then(|key| {
+ ProjectSettings::get(location, cx)
+ .context_servers
+ .get(key.as_str())
+ })
+ .cloned()
+ .unwrap_or_else(|| {
+ project::project_settings::ContextServerSettings::default_extension(
+ )
+ });
+
+ match settings {
+ project::project_settings::ContextServerSettings::Stdio {
+ enabled: _,
+ command,
+ } => Ok(serde_json::to_string(&settings::ContextServerSettings {
+ command: Some(settings::CommandSettings {
+ path: command.path.to_str().map(|path| path.to_string()),
+ arguments: Some(command.args),
+ env: command.env.map(|env| env.into_iter().collect()),
+ }),
+ settings: None,
+ })?),
+ project::project_settings::ContextServerSettings::Extension {
+ enabled: _,
+ settings,
+ } => Ok(serde_json::to_string(&settings::ContextServerSettings {
+ command: None,
+ settings: Some(settings),
+ })?),
+ project::project_settings::ContextServerSettings::Http { .. } => {
+ bail!("remote context server settings not supported in 0.6.0")
+ }
+ }
+ }
+ _ => {
+ bail!("Unknown settings category: {}", category);
+ }
+ })
+ }
+ .boxed_local()
+ })
+ .await?
+ .to_wasmtime_result()
+ }
+
+ async fn set_language_server_installation_status(
+ &mut self,
+ server_name: String,
+ status: LanguageServerInstallationStatus,
+ ) -> wasmtime::Result<()> {
+ let status = match status {
+ LanguageServerInstallationStatus::CheckingForUpdate => BinaryStatus::CheckingForUpdate,
+ LanguageServerInstallationStatus::Downloading => BinaryStatus::Downloading,
+ LanguageServerInstallationStatus::None => BinaryStatus::None,
+ LanguageServerInstallationStatus::Failed(error) => BinaryStatus::Failed { error },
+ };
+
+ self.host
+ .proxy
+ .update_language_server_status(::lsp::LanguageServerName(server_name.into()), status);
+
+ Ok(())
+ }
+
+ async fn download_file(
+ &mut self,
+ url: String,
+ path: String,
+ file_type: DownloadedFileType,
+ ) -> wasmtime::Result<Result<(), String>> {
+ maybe!(async {
+ let parsed_url = Url::parse(&url)?;
+ self.capability_granter.grant_download_file(&parsed_url)?;
+
+ let path = PathBuf::from(path);
+ let extension_work_dir = self.host.work_dir.join(self.manifest.id.as_ref());
+
+ self.host.fs.create_dir(&extension_work_dir).await?;
+
+ let destination_path = self
+ .host
+ .writeable_path_from_extension(&self.manifest.id, &path)?;
+
+ let mut response = self
+ .host
+ .http_client
+ .get(&url, Default::default(), true)
+ .await
+ .context("downloading release")?;
+
+ anyhow::ensure!(
+ response.status().is_success(),
+ "download failed with status {}",
+ response.status()
+ );
+ let body = BufReader::new(response.body_mut());
+
+ match file_type {
+ DownloadedFileType::Uncompressed => {
+ futures::pin_mut!(body);
+ self.host
+ .fs
+ .create_file_with(&destination_path, body)
+ .await?;
+ }
+ DownloadedFileType::Gzip => {
+ let body = GzipDecoder::new(body);
+ futures::pin_mut!(body);
+ self.host
+ .fs
+ .create_file_with(&destination_path, body)
+ .await?;
+ }
+ DownloadedFileType::GzipTar => {
+ let body = GzipDecoder::new(body);
+ futures::pin_mut!(body);
+ self.host
+ .fs
+ .extract_tar_file(&destination_path, Archive::new(body))
+ .await?;
+ }
+ DownloadedFileType::Zip => {
+ futures::pin_mut!(body);
+ extract_zip(&destination_path, body)
+ .await
+ .with_context(|| format!("unzipping {path:?} archive"))?;
+ }
+ }
+
+ Ok(())
+ })
+ .await
+ .to_wasmtime_result()
+ }
+
+ async fn make_file_executable(&mut self, path: String) -> wasmtime::Result<Result<(), String>> {
+ let path = self
+ .host
+ .writeable_path_from_extension(&self.manifest.id, Path::new(&path))?;
+
+ make_file_executable(&path)
+ .await
+ .with_context(|| format!("setting permissions for path {path:?}"))
+ .to_wasmtime_result()
+ }
+
+ // =========================================================================
+ // LLM Provider Import Implementations
+ // =========================================================================
+
+ async fn llm_request_credential(
+ &mut self,
+ _provider_id: String,
+ _credential_type: llm_provider::CredentialType,
+ _label: String,
+ _placeholder: String,
+ ) -> wasmtime::Result<Result<bool, String>> {
+ // For now, credential requests return false (not provided)
+ // Extensions should use llm_get_env_var to check for env vars first,
+ // then llm_store_credential/llm_get_credential for manual storage
+ // Full UI credential prompting will be added in a future phase
+ Ok(Ok(false))
+ }
+
+ async fn llm_get_credential(
+ &mut self,
+ provider_id: String,
+ ) -> wasmtime::Result<Option<String>> {
+ let extension_id = self.manifest.id.clone();
+ let credential_key = format!("{}:{}", extension_id, provider_id);
+
+ self.on_main_thread(move |cx| {
+ async move {
+ let task = cx.update(|cx| cx.read_credentials(&credential_key))?;
+ let result = task.await.ok().flatten();
+ Ok(result.map(|(_, password)| String::from_utf8_lossy(&password).to_string()))
+ }
+ .boxed_local()
+ })
+ .await
+ }
+
+ async fn llm_store_credential(
+ &mut self,
+ provider_id: String,
+ value: String,
+ ) -> wasmtime::Result<Result<(), String>> {
+ let extension_id = self.manifest.id.clone();
+ let credential_key = format!("{}:{}", extension_id, provider_id);
+
+ self.on_main_thread(move |cx| {
+ async move {
+ let task = cx.update(|cx| {
+ cx.write_credentials(&credential_key, "api_key", value.as_bytes())
+ })?;
+ task.await.map_err(|e| anyhow::anyhow!("{}", e))
+ }
+ .boxed_local()
+ })
+ .await
+ .to_wasmtime_result()
+ }
+
+ async fn llm_delete_credential(
+ &mut self,
+ provider_id: String,
+ ) -> wasmtime::Result<Result<(), String>> {
+ let extension_id = self.manifest.id.clone();
+ let credential_key = format!("{}:{}", extension_id, provider_id);
+
+ self.on_main_thread(move |cx| {
+ async move {
+ let task = cx.update(|cx| cx.delete_credentials(&credential_key))?;
+ task.await.map_err(|e| anyhow::anyhow!("{}", e))
+ }
+ .boxed_local()
+ })
+ .await
+ .to_wasmtime_result()
+ }
+
+ async fn llm_get_env_var(&mut self, name: String) -> wasmtime::Result<Option<String>> {
+ Ok(env::var(&name).ok())
+ }
+}
+
+// =============================================================================
+// LLM Provider Host Implementations
+// =============================================================================
+
+impl llm_provider::Host for WasmState {}
@@ -28,6 +28,7 @@ convert_case.workspace = true
copilot.workspace = true
credentials_provider.workspace = true
deepseek = { workspace = true, features = ["schemars"] }
+extension.workspace = true
fs.workspace = true
futures.workspace = true
google_ai = { workspace = true, features = ["schemars"] }
@@ -0,0 +1,33 @@
+use extension::{ExtensionLanguageModelProviderProxy, LanguageModelProviderRegistration};
+use gpui::{App, Entity};
+use language_model::{LanguageModelProviderId, LanguageModelRegistry};
+use std::sync::Arc;
+
+/// Proxy implementation that registers extension-based language model providers
+/// with the LanguageModelRegistry.
+pub struct ExtensionLanguageModelProxy {
+ registry: Entity<LanguageModelRegistry>,
+}
+
+impl ExtensionLanguageModelProxy {
+ pub fn new(registry: Entity<LanguageModelRegistry>) -> Self {
+ Self { registry }
+ }
+}
+
+impl ExtensionLanguageModelProviderProxy for ExtensionLanguageModelProxy {
+ fn register_language_model_provider(
+ &self,
+ _provider_id: Arc<str>,
+ register_fn: LanguageModelProviderRegistration,
+ cx: &mut App,
+ ) {
+ register_fn(cx);
+ }
+
+ fn unregister_language_model_provider(&self, provider_id: Arc<str>, cx: &mut App) {
+ self.registry.update(cx, |registry, cx| {
+ registry.unregister_provider(LanguageModelProviderId::from(provider_id), cx);
+ });
+ }
+}
@@ -1,5 +1,6 @@
use std::sync::Arc;
+use ::extension::ExtensionHostProxy;
use ::settings::{Settings, SettingsStore};
use client::{Client, UserStore};
use collections::HashSet;
@@ -8,6 +9,7 @@ use language_model::{LanguageModelProviderId, LanguageModelRegistry};
use provider::deepseek::DeepSeekLanguageModelProvider;
mod api_key;
+mod extension;
pub mod provider;
mod settings;
pub mod ui;
@@ -33,6 +35,12 @@ pub fn init(user_store: Entity<UserStore>, client: Arc<Client>, cx: &mut App) {
register_language_model_providers(registry, user_store, client.clone(), cx);
});
+ // Register the extension language model provider proxy
+ let extension_proxy = ExtensionHostProxy::default_global(cx);
+ extension_proxy.register_language_model_provider_proxy(
+ extension::ExtensionLanguageModelProxy::new(registry.clone()),
+ );
+
let mut openai_compatible_providers = AllLanguageModelSettings::get_global(cx)
.openai_compatible
.keys()
@@ -0,0 +1,689 @@
+# Language Model Provider Extensions - Implementation Guide
+
+## Purpose
+
+This document provides a detailed guide for completing the implementation of Language Model Provider Extensions in Zed. It explains what has been done, what remains, and how to complete the work.
+
+For the full design and rationale, see [language_model_provider_extensions_plan.md](./language_model_provider_extensions_plan.md).
+
+## Core Design Principle
+
+**Extensions handle ALL provider-specific logic.** This means:
+- Thought signatures (Anthropic)
+- Reasoning effort parameters (OpenAI o-series)
+- Cache control markers
+- Parallel tool calls
+- SSE/streaming format parsing
+- Any other provider-specific features
+
+Zed's core should have **zero knowledge** of these details. The extension API must be generic enough that extensions can implement any provider without Zed changes.
+
+---
+
+## Current Status: STREAMING API COMPLETE ✅
+
+The core plumbing and streaming API are now complete. Extensions can:
+1. Declare LLM providers in their manifest
+2. Be queried for providers and models at load time
+3. Have their providers registered with the `LanguageModelRegistry`
+4. Have their providers unregistered when the extension is unloaded
+5. Stream completions using the new polling-based API
+
+**What's NOT done yet:**
+- Credential UI prompt support (`llm_request_credential` returns false)
+- Model refresh mechanism
+- A working test extension that demonstrates the feature (requires WASM build)
+- End-to-end testing with a real extension
+
+---
+
+## What Has Been Completed
+
+### 1. WIT Interface Definition ✅
+
+**Location:** `crates/extension_api/wit/since_v0.7.0/`
+
+Created all WIT files for v0.7.0:
+- `llm-provider.wit` - Core LLM types (ProviderInfo, ModelInfo, CompletionRequest, CompletionEvent, etc.)
+- `extension.wit` - Updated with LLM exports/imports
+
+Key types in `llm-provider.wit`:
+```wit
+record provider-info {
+ id: string,
+ name: string,
+ icon: option<string>,
+}
+
+record model-info {
+ id: string,
+ name: string,
+ max-token-count: u64,
+ max-output-tokens: option<u64>,
+ capabilities: model-capabilities,
+ is-default: bool,
+ is-default-fast: bool,
+}
+
+variant completion-event {
+ started,
+ text(string),
+ thinking(thinking-content),
+ redacted-thinking(string),
+ tool-use(tool-use),
+ tool-use-json-parse-error(tool-use-json-parse-error),
+ stop(stop-reason),
+ usage(token-usage),
+ reasoning-details(string),
+}
+```
+
+Key exports in `extension.wit`:
+```wit
+export llm-providers: func() -> list<provider-info>;
+export llm-provider-models: func(provider-id: string) -> result<list<model-info>, string>;
+export llm-provider-is-authenticated: func(provider-id: string) -> bool;
+export llm-provider-authenticate: func(provider-id: string) -> result<_, string>;
+export llm-stream-completion-start: func(provider-id: string, model-id: string, request: completion-request) -> result<string, string>;
+export llm-stream-completion-next: func(stream-id: string) -> result<option<completion-event>, string>;
+export llm-stream-completion-close: func(stream-id: string);
+```
+
+Note: The streaming API uses a polling-based approach with explicit stream IDs instead of a resource handle.
+This avoids complexity with cross-boundary resource ownership in the WASM component model.
+
+Key imports in `extension.wit`:
+```wit
+import llm-get-credential: func(provider-id: string) -> option<string>;
+import llm-store-credential: func(provider-id: string, value: string) -> result<_, string>;
+import llm-delete-credential: func(provider-id: string) -> result<_, string>;
+import llm-get-env-var: func(name: string) -> option<string>;
+```
+
+### 2. Extension Manifest Changes ✅
+
+**Location:** `crates/extension/src/extension_manifest.rs`
+
+Added these types:
+```rust
+pub struct LanguageModelProviderManifestEntry {
+ pub name: String,
+ pub icon: Option<String>,
+ pub models: Vec<LanguageModelManifestEntry>,
+ pub auth: Option<LanguageModelAuthConfig>,
+}
+
+pub struct LanguageModelManifestEntry {
+ pub id: String,
+ pub name: String,
+ pub max_token_count: u64,
+ pub max_output_tokens: Option<u64>,
+ pub supports_images: bool,
+ pub supports_tools: bool,
+ pub supports_thinking: bool,
+}
+
+pub struct LanguageModelAuthConfig {
+ pub env_var: Option<String>,
+ pub credential_label: Option<String>,
+}
+```
+
+Added to `ExtensionManifest`:
+```rust
+pub language_model_providers: BTreeMap<Arc<str>, LanguageModelProviderManifestEntry>,
+```
+
+### 3. Host-Side Provider/Model Structs ✅
+
+**Location:** `crates/extension_host/src/wasm_host/llm_provider.rs`
+
+Created `ExtensionLanguageModelProvider` implementing `LanguageModelProvider`:
+- Wraps a `WasmExtension` and `LlmProviderInfo`
+- Delegates to extension calls for authentication, model listing, etc.
+- Returns `ExtensionLanguageModel` instances
+- Implements `LanguageModelProviderState` for UI observation
+
+Created `ExtensionLanguageModel` implementing `LanguageModel`:
+- Wraps extension + model info
+- Implements `stream_completion` by calling extension's `llm-stream-completion`
+- Converts between Zed's `LanguageModelRequest` and WIT's `CompletionRequest`
+- Handles streaming via polling-based approach with explicit stream IDs
+
+**Key implementation details:**
+- The `stream_completion` method uses a polling loop that calls `llm_stream_completion_start`, then repeatedly calls `llm_stream_completion_next` until the stream is complete, and finally calls `llm_stream_completion_close` to clean up
+- Credential storage uses gpui's `cx.read_credentials()`, `cx.write_credentials()`, and `cx.delete_credentials()` APIs
+- The `new()` method now accepts a `models: Vec<LlmModelInfo>` parameter to populate available models at registration time
+
+### 4. Extension Host Proxy ✅
+
+**Location:** `crates/extension/src/extension_host_proxy.rs`
+
+Added `ExtensionLanguageModelProviderProxy` trait:
+```rust
+pub type LanguageModelProviderRegistration = Box<dyn FnOnce(&mut App) + Send + Sync + 'static>;
+
+pub trait ExtensionLanguageModelProviderProxy: Send + Sync + 'static {
+ fn register_language_model_provider(
+ &self,
+ provider_id: Arc<str>,
+ register_fn: LanguageModelProviderRegistration,
+ cx: &mut App,
+ );
+
+ fn unregister_language_model_provider(&self, provider_id: Arc<str>, cx: &mut App);
+}
+```
+
+The proxy uses a boxed closure pattern. This allows `extension_host` to create the `ExtensionLanguageModelProvider` (which requires `WasmExtension`), while letting `language_models` handle the actual registry registration.
+
+### 5. Proxy Implementation ✅
+
+**Location:** `crates/language_models/src/extension.rs`
+
+```rust
+pub struct ExtensionLanguageModelProxy {
+ registry: Entity<LanguageModelRegistry>,
+}
+
+impl ExtensionLanguageModelProviderProxy for ExtensionLanguageModelProxy {
+ fn register_language_model_provider(
+ &self,
+ _provider_id: Arc<str>,
+ register_fn: LanguageModelProviderRegistration,
+ cx: &mut App,
+ ) {
+ register_fn(cx);
+ }
+
+ fn unregister_language_model_provider(&self, provider_id: Arc<str>, cx: &mut App) {
+ self.registry.update(cx, |registry, cx| {
+ registry.unregister_provider(LanguageModelProviderId::from(provider_id), cx);
+ });
+ }
+}
+```
+
+The proxy is registered during `language_models::init()`.
+
+### 6. Extension Loading Wiring ✅
+
+**Location:** `crates/extension_host/src/extension_host.rs`
+
+In `extensions_updated()`:
+
+**Unloading (around line 1217):**
+```rust
+for provider_id in extension.manifest.language_model_providers.keys() {
+ let full_provider_id: Arc<str> = format!("{}:{}", extension_id, provider_id).into();
+ self.proxy.unregister_language_model_provider(full_provider_id, cx);
+}
+```
+
+**Loading (around line 1383):**
+After loading a wasm extension, we query for LLM providers and models:
+```rust
+if !extension.manifest.language_model_providers.is_empty() {
+ let providers_result = wasm_extension
+ .call(|ext, store| {
+ async move { ext.call_llm_providers(store).await }.boxed()
+ })
+ .await;
+
+ if let Ok(Ok(providers)) = providers_result {
+ for provider_info in providers {
+ // Query for models...
+ let models_result = wasm_extension.call(...).await;
+ // Store provider_info and models for registration
+ }
+ }
+}
+```
+
+Then during registration (around line 1511):
+```rust
+for (provider_info, models) in llm_providers_with_models {
+ let provider_id: Arc<str> = format!("{}:{}", manifest.id, provider_info.id).into();
+ this.proxy.register_language_model_provider(
+ provider_id,
+ Box::new(move |cx: &mut App| {
+ let provider = Arc::new(ExtensionLanguageModelProvider::new(
+ wasm_ext, pinfo, mods, cx,
+ ));
+ language_model::LanguageModelRegistry::global(cx).update(
+ cx,
+ |registry, cx| {
+ registry.register_provider(provider, cx);
+ },
+ );
+ }),
+ cx,
+ );
+}
+```
+
+### 7. Extension API Updates ✅
+
+**Location:** `crates/extension_api/src/extension_api.rs`
+
+- Updated `wit_bindgen::generate!` to use `./wit/since_v0.7.0`
+- Added LLM type re-exports (prefixed with `Llm` for clarity)
+- Added LLM methods to `Extension` trait with default implementations
+- Added `wit::Guest` implementations for LLM functions
+
+The default implementations ensure backward compatibility:
+```rust
+fn llm_providers(&self) -> Vec<LlmProviderInfo> {
+ Vec::new() // Extensions without LLM providers return empty
+}
+
+fn llm_provider_models(&self, _provider_id: &str) -> Result<Vec<LlmModelInfo>, String> {
+ Ok(Vec::new())
+}
+
+fn llm_stream_completion_start(...) -> Result<String, String> {
+ Err("`llm_stream_completion_start` not implemented".to_string())
+}
+fn llm_stream_completion_next(stream_id: &str) -> Result<Option<LlmCompletionEvent>, String> {
+ Err("`llm_stream_completion_next` not implemented".to_string())
+}
+fn llm_stream_completion_close(stream_id: &str) { /* cleanup */ }
+```
+
+### 8. Test Files Updated ✅
+
+Added `language_model_providers: BTreeMap::default()` to all test manifests:
+- `crates/extension/src/extension_manifest.rs` (test module)
+- `crates/extension_host/src/extension_store_test.rs`
+- `crates/extension_host/src/capability_granter.rs` (test module)
+- `crates/extension_host/benches/extension_compilation_benchmark.rs`
+
+---
+
+## What Remains To Be Done
+
+### Task 1: Test the Streaming Completion Flow (HIGH PRIORITY) - ARCHITECTURE UPDATED ✅
+
+The streaming API has been updated to use a polling-based approach instead of a resource handle pattern.
+This was necessary because the original design had a fundamental issue: the `completion-stream` resource
+was defined in an imported interface but returned from an exported function, creating ownership ambiguity.
+
+**New API:**
+- `llm-stream-completion-start` - Returns a stream ID (string)
+- `llm-stream-completion-next` - Poll for the next event using the stream ID
+- `llm-stream-completion-close` - Clean up the stream when done
+
+**Still needs testing:**
+1. Create a test extension that implements a simple LLM provider
+2. Verify the polling-based streaming works correctly through the WASM boundary
+3. Test error handling and edge cases
+
+**Location to test:** `crates/extension_host/src/wasm_host/llm_provider.rs` - the `stream_completion` method on `ExtensionLanguageModel`.
+
+### Task 2: Credential UI Prompt Support (MEDIUM PRIORITY)
+
+**Location:** `crates/extension_host/src/wasm_host/wit/since_v0_7_0.rs`
+
+The `llm_request_credential` host function currently returns `Ok(Ok(false))`:
+```rust
+async fn llm_request_credential(
+ &mut self,
+ _provider_id: String,
+ _credential_type: llm_provider::CredentialType,
+ _label: String,
+ _placeholder: String,
+) -> wasmtime::Result<Result<bool, String>> {
+ // TODO: Implement actual UI prompting
+ Ok(Ok(false))
+}
+```
+
+**What needs to happen:**
+1. Show a dialog to the user asking for the credential
+2. Wait for user input
+3. Return `true` if provided, `false` if cancelled
+4. The extension can then use `llm_store_credential` to save it
+
+This requires UI work and async coordination with gpui windows.
+
+### Task 3: Handle Model Refresh (LOW PRIORITY - can be follow-up)
+
+Currently models are only queried once at registration time. Options for improvement:
+
+1. Add a refresh mechanism that re-queries `call_llm_provider_models`
+2. Add a notification mechanism where extensions can signal that models have changed
+3. Automatic refresh on authentication
+
+**Recommendation:** Start with refresh-on-authentication as a fast-follow.
+
+### Task 4: Create a Test Extension (LOW PRIORITY - but very useful)
+
+**Note:** Creating a working test extension requires building a WASM component, which needs:
+1. The `wasm32-wasip1` Rust target: `rustup target add wasm32-wasip1`
+2. Building with: `cargo build --target wasm32-wasip1 --release`
+3. The resulting `.wasm` file must be placed in the extension directory
+
+The existing `extensions/test-extension` has a pre-built WASM file checked in. To test LLM
+provider functionality, either:
+- Rebuild the test-extension WASM with LLM provider code
+- Create a new extension and build it locally
+
+Example test extension that demonstrates the LLM provider API:
+
+```
+extensions/test-llm-provider/
+├── extension.toml
+├── Cargo.toml
+└── src/
+ └── lib.rs
+```
+
+**extension.toml:**
+```toml
+id = "test-llm-provider"
+name = "Test LLM Provider"
+version = "0.1.0"
+schema_version = 1
+
+[language_model_providers.test-provider]
+name = "Test Provider"
+```
+
+**src/lib.rs:**
+```rust
+use zed_extension_api::{self as zed, *};
+
+use std::collections::HashMap;
+use std::sync::Mutex;
+
+struct TestExtension {
+ streams: Mutex<HashMap<String, Vec<LlmCompletionEvent>>>,
+ next_stream_id: Mutex<u64>,
+}
+
+impl zed::Extension for TestExtension {
+ fn new() -> Self {
+ Self {
+ streams: Mutex::new(HashMap::new()),
+ next_stream_id: Mutex::new(0),
+ }
+ }
+
+ fn llm_providers(&self) -> Vec<LlmProviderInfo> {
+ vec![LlmProviderInfo {
+ id: "test-provider".into(),
+ name: "Test Provider".into(),
+ icon: None,
+ }]
+ }
+
+ fn llm_provider_models(&self, _provider_id: &str) -> Result<Vec<LlmModelInfo>, String> {
+ Ok(vec![LlmModelInfo {
+ id: "test-model".into(),
+ name: "Test Model".into(),
+ max_token_count: 4096,
+ max_output_tokens: Some(1024),
+ capabilities: LlmModelCapabilities {
+ supports_images: false,
+ supports_tools: false,
+ supports_tool_choice_auto: false,
+ supports_tool_choice_any: false,
+ supports_tool_choice_none: false,
+ supports_thinking: false,
+ tool_input_format: LlmToolInputFormat::JsonSchema,
+ },
+ is_default: true,
+ is_default_fast: true,
+ }])
+ }
+
+ fn llm_stream_completion_start(
+ &mut self,
+ _provider_id: &str,
+ _model_id: &str,
+ _request: &LlmCompletionRequest,
+ ) -> Result<String, String> {
+ // Create a simple response with test events
+ let events = vec![
+ LlmCompletionEvent::Started,
+ LlmCompletionEvent::Text("Hello, ".into()),
+ LlmCompletionEvent::Text("world!".into()),
+ LlmCompletionEvent::Stop(LlmStopReason::EndTurn),
+ ];
+
+ let mut id = self.next_stream_id.lock().unwrap();
+ let stream_id = format!("stream-{}", *id);
+ *id += 1;
+
+ self.streams.lock().unwrap().insert(stream_id.clone(), events);
+ Ok(stream_id)
+ }
+
+ fn llm_stream_completion_next(
+ &mut self,
+ stream_id: &str,
+ ) -> Result<Option<LlmCompletionEvent>, String> {
+ let mut streams = self.streams.lock().unwrap();
+ if let Some(events) = streams.get_mut(stream_id) {
+ if events.is_empty() {
+ Ok(None)
+ } else {
+ Ok(Some(events.remove(0)))
+ }
+ } else {
+ Err(format!("Unknown stream: {}", stream_id))
+ }
+ }
+
+ fn llm_stream_completion_close(&mut self, stream_id: &str) {
+ self.streams.lock().unwrap().remove(stream_id);
+ }
+}
+
+zed::register_extension!(TestExtension);
+```
+
+---
+
+## File-by-File Checklist
+
+### Completed ✅
+
+- [x] `crates/extension_api/wit/since_v0.7.0/llm-provider.wit` - LLM types defined
+- [x] `crates/extension_api/wit/since_v0.7.0/extension.wit` - LLM exports/imports added
+- [x] `crates/extension_api/src/extension_api.rs` - Extension trait + Guest impl updated for v0.7.0
+- [x] `crates/extension/src/extension_manifest.rs` - Manifest types added
+- [x] `crates/extension/src/extension_host_proxy.rs` - Proxy trait added
+- [x] `crates/extension_host/src/wasm_host/llm_provider.rs` - Provider/Model structs created
+- [x] `crates/extension_host/src/wasm_host/wit.rs` - LLM types exported, Extension enum updated
+- [x] `crates/extension_host/src/wasm_host/wit/since_v0_7_0.rs` - Host trait implementations
+- [x] `crates/extension_host/src/wasm_host/wit/since_v0_6_0.rs` - Rewritten to use latest types
+- [x] `crates/extension_host/src/extension_host.rs` - Wired up LLM provider registration/unregistration
+- [x] `crates/extension_host/Cargo.toml` - Dependencies added
+- [x] `crates/language_models/src/extension.rs` - Proxy implementation
+- [x] `crates/language_models/src/language_models.rs` - Proxy registration
+- [x] `crates/language_models/Cargo.toml` - Extension dependency added
+
+### Should Implement (Follow-up PRs)
+
+- [ ] `llm_request_credential` UI implementation
+- [ ] Model refresh mechanism
+- [ ] Test extension for validation
+- [ ] Documentation for extension authors
+
+---
+
+## Architecture Overview
+
+```
+┌─────────────────────────────────────────────────────────────────────┐
+│ Extension Host │
+│ ┌─────────────────────────────────────────────────────────────┐ │
+│ │ extensions_updated() │ │
+│ │ │ │
+│ │ 1. Load WasmExtension │ │
+│ │ 2. Query llm_providers() and llm_provider_models() │ │
+│ │ 3. Call proxy.register_language_model_provider() │ │
+│ └───────────────────────────┬───────────────────────────────────┘ │
+│ │ │
+│ ┌───────────────────────────▼───────────────────────────────────┐ │
+│ │ ExtensionLanguageModelProvider │ │
+│ │ - Wraps WasmExtension │ │
+│ │ - Implements LanguageModelProvider │ │
+│ │ - Creates ExtensionLanguageModel instances │ │
+│ └───────────────────────────┬───────────────────────────────────┘ │
+│ │ │
+│ ┌───────────────────────────▼───────────────────────────────────┐ │
+│ │ ExtensionLanguageModel │ │
+│ │ - Implements LanguageModel │ │
+│ │ - stream_completion() calls extension via WASM │ │
+│ └───────────────────────────────────────────────────────────────┘ │
+└─────────────────────────────────────────────────────────────────────┘
+ │
+ │ Proxy (boxed closure)
+ ▼
+┌─────────────────────────────────────────────────────────────────────┐
+│ Language Models Crate │
+│ ┌───────────────────────────────────────────────────────────────┐ │
+│ │ ExtensionLanguageModelProxy │ │
+│ │ - Implements ExtensionLanguageModelProviderProxy │ │
+│ │ - Calls register_fn closure │ │
+│ │ - Unregisters from LanguageModelRegistry │ │
+│ └───────────────────────────┬───────────────────────────────────┘ │
+│ │ │
+│ ┌───────────────────────────▼───────────────────────────────────┐ │
+│ │ LanguageModelRegistry │ │
+│ │ - Stores all providers (built-in + extension) │ │
+│ │ - Provides models to UI │ │
+│ └───────────────────────────────────────────────────────────────┘ │
+└─────────────────────────────────────────────────────────────────────┘
+```
+
+---
+
+## Key Code Patterns
+
+### 1. Provider ID Format
+
+Provider IDs are formatted as `{extension_id}:{provider_id}` to ensure uniqueness:
+
+```rust
+let provider_id: Arc<str> = format!("{}:{}", manifest.id, provider_info.id).into();
+```
+
+### 2. Triple-Nested Result Handling
+
+When calling extension methods, results are nested:
+- Outer `Result`: from channel operations (anyhow error)
+- Middle `Result`: from WASM call (anyhow error)
+- Inner `Result<T, String>`: from extension logic
+
+```rust
+let models_result = wasm_extension.call(...).await;
+
+let models: Vec<LlmModelInfo> = match models_result {
+ Ok(Ok(Ok(models))) => models,
+ Ok(Ok(Err(e))) => { /* extension returned error */ }
+ Ok(Err(e)) => { /* WASM call failed */ }
+ Err(e) => { /* channel operation failed */ }
+};
+```
+
+### 3. Polling-Based Streaming Pattern
+
+The streaming API uses explicit stream IDs with polling instead of resource handles:
+
+```rust
+// Start the stream and get an ID
+let stream_id = ext.call_llm_stream_completion_start(store, provider_id, model_id, request).await?;
+
+// Poll for events in a loop
+loop {
+ match ext.call_llm_stream_completion_next(store, &stream_id).await? {
+ Ok(Some(event)) => { /* process event */ }
+ Ok(None) => break, // Stream complete
+ Err(e) => { /* handle error */ }
+ }
+}
+
+// Clean up
+ext.call_llm_stream_completion_close(store, &stream_id).await;
+```
+
+This pattern avoids the complexity of cross-boundary resource ownership in the WASM component model.
+
+### 4. Default Trait Implementations
+
+All LLM methods in the `Extension` trait have defaults so existing extensions continue to work:
+
+```rust
+fn llm_providers(&self) -> Vec<LlmProviderInfo> {
+ Vec::new() // No providers by default
+}
+```
+
+---
+
+## Common Pitfalls
+
+1. **Type confusion:** WIT bindgen creates NEW types for each version. `Completion` from v0.6.0 bindgen is different from v0.7.0. This is why we map older interfaces to `latest::`.
+
+2. **Import paths:** After `pub use self::zed::extension::*;`, types are available without prefix. Types in sub-interfaces (like `lsp::CompletionKind`) need explicit imports.
+
+3. **Async closures:** Extension calls use `extension.call(|ext, store| async move { ... }.boxed())` pattern. The closure must be `'static + Send`.
+
+4. **Stream ID management:** Extensions must track their active streams using the stream IDs returned from `llm_stream_completion_start`. The host will call `llm_stream_completion_close` when done.
+
+5. **Result nesting:** `extension.call(...)` wraps the closure's return type in `Result<T>`, so if the closure returns `Result<Result<X, String>>`, you get `Result<Result<Result<X, String>>>`. Unwrap carefully!
+
+6. **Proxy type boundaries:** The `extension` crate shouldn't depend on `extension_host`. The proxy trait uses a boxed closure to pass the registration logic without needing to share types.
+
+7. **Resource ownership in WIT:** Be careful when defining resources in imported interfaces but returning them from exported functions. This creates ownership ambiguity. The streaming API was changed to use polling to avoid this issue.
+
+---
+
+## Testing
+
+All existing tests pass:
+```bash
+cargo test -p extension_host --lib
+# 3 tests pass
+
+./script/clippy
+# No warnings
+```
+
+To test the full flow manually:
+1. Create a test extension with LLM provider
+2. Build and install it
+3. Check if it appears in the model selector
+4. Try making a completion request
+
+---
+
+## Relevant Files for Reference
+
+### How providers are registered
+- `crates/language_model/src/registry.rs` - `LanguageModelRegistry::register_provider`
+
+### How other extension proxies work
+- `crates/extension/src/extension_host_proxy.rs` - the proxy pattern
+- `crates/project/src/context_server_store/extension.rs` - context server proxy implementation
+
+### How extensions are loaded
+- `crates/extension_host/src/extension_host.rs` - `extensions_updated` method
+
+### WasmExtension call pattern
+- `crates/extension_host/src/wasm_host.rs` - `WasmExtension::call` method
+
+---
+
+## Questions for Follow-up
+
+1. **Where should configuration UI live?** The current implementation uses an empty config view. Should extension providers have configurable settings?
+
+2. **How to handle extension reload?** Currently, in-flight completions will fail if the extension is unloaded. Should we add graceful handling?
+
+3. **Should there be rate limiting?** If an extension's provider misbehaves, should Zed throttle or disable it?
+
+4. **Icon support:** The `provider_info.icon` field exists but `icon()` on the provider returns `ui::IconName::ZedAssistant`. Should we add custom icon support?
@@ -0,0 +1,1368 @@
+# Language Model Provider Extensions Plan
+
+## Executive Summary
+
+This document outlines a comprehensive plan to introduce **Language Model Provider Extensions** to Zed. This feature will allow third-party developers to create extensions that register new language model providers, enabling users to select and use custom language models in Zed's AI features (Agent, inline assist, commit message generation, etc.).
+
+## Table of Contents
+
+1. [Current Architecture Overview](#current-architecture-overview)
+2. [Goals and Requirements](#goals-and-requirements)
+3. [Proposed Architecture](#proposed-architecture)
+4. [Implementation Phases](#implementation-phases)
+5. [WIT Interface Design](#wit-interface-design)
+6. [Extension Manifest Changes](#extension-manifest-changes)
+7. [Migration Plan for Built-in Providers](#migration-plan-for-built-in-providers)
+8. [Testing Strategy](#testing-strategy)
+9. [Security Considerations](#security-considerations)
+10. [Appendix: Provider-Specific Requirements](#appendix-provider-specific-requirements)
+
+---
+
+## Current Architecture Overview
+
+### Key Components
+
+#### `language_model` crate (`crates/language_model/`)
+- **`LanguageModel` trait** (`src/language_model.rs:580-718`): Core trait defining model capabilities
+ - `id()`, `name()`, `provider_id()`, `provider_name()`
+ - `supports_images()`, `supports_tools()`, `supports_tool_choice()`
+ - `max_token_count()`, `max_output_tokens()`
+ - `count_tokens()` - async token counting
+ - `stream_completion()` - the main completion streaming method
+ - `cache_configuration()` - optional prompt caching config
+
+- **`LanguageModelProvider` trait** (`src/language_model.rs:743-764`): Provider registration
+ - `id()`, `name()`, `icon()`
+ - `default_model()`, `default_fast_model()`
+ - `provided_models()`, `recommended_models()`
+ - `is_authenticated()`, `authenticate()`
+ - `configuration_view()` - UI for provider configuration
+ - `reset_credentials()`
+
+- **`LanguageModelRegistry`** (`src/registry.rs`): Global registry for providers
+ - `register_provider()` / `unregister_provider()`
+ - Model selection and configuration
+ - Event emission for UI updates
+
+#### `language_models` crate (`crates/language_models/`)
+Contains all built-in provider implementations:
+- `provider/anthropic.rs` - Anthropic Claude models
+- `provider/cloud.rs` - Zed Cloud (proxied models)
+- `provider/google.rs` - Google Gemini models
+- `provider/open_ai.rs` - OpenAI GPT models
+- `provider/ollama.rs` - Local Ollama models
+- `provider/deepseek.rs` - DeepSeek models
+- `provider/open_router.rs` - OpenRouter aggregator
+- `provider/bedrock.rs` - AWS Bedrock
+- And more...
+
+#### Extension System (`crates/extension_host/`, `crates/extension_api/`)
+- **WIT interface** (`extension_api/wit/since_v0.6.0/`): WebAssembly Interface Types definitions
+- **WASM host** (`extension_host/src/wasm_host.rs`): Executes extension WASM modules
+- **Extension trait** (`extension/src/extension.rs`): Rust trait for extensions
+- **HTTP client** (`extension_api/src/http_client.rs`): Existing HTTP capability for extensions
+
+### Request/Response Flow
+
+```
+User Request
+ ↓
+LanguageModelRequest (crates/language_model/src/request.rs)
+ ↓
+Provider-specific conversion (e.g., into_anthropic(), into_open_ai())
+ ↓
+HTTP API call (provider-specific crate)
+ ↓
+Stream of provider-specific events
+ ↓
+Event mapping to LanguageModelCompletionEvent
+ ↓
+Consumer (Agent, Inline Assist, etc.)
+```
+
+### Key Data Structures
+
+```rust
+// Request
+pub struct LanguageModelRequest {
+ pub thread_id: Option<String>,
+ pub prompt_id: Option<String>,
+ pub intent: Option<CompletionIntent>,
+ pub mode: Option<CompletionMode>,
+ pub messages: Vec<LanguageModelRequestMessage>,
+ pub tools: Vec<LanguageModelRequestTool>,
+ pub tool_choice: Option<LanguageModelToolChoice>,
+ pub stop: Vec<String>,
+ pub temperature: Option<f32>,
+ pub thinking_allowed: bool,
+}
+
+// Completion Events
+pub enum LanguageModelCompletionEvent {
+ Queued { position: usize },
+ Started,
+ UsageUpdated { amount: usize, limit: usize },
+ ToolUseLimitReached,
+ Stop(StopReason),
+ Text(String),
+ Thinking { text: String, signature: Option<String> },
+ RedactedThinking { data: String },
+ ToolUse(LanguageModelToolUse),
+ ToolUseJsonParseError { ... },
+ StartMessage { message_id: Option<String> },
+ ReasoningDetails(serde_json::Value),
+ UsageUpdate(TokenUsage),
+}
+```
+
+---
+
+## Goals and Requirements
+
+### Primary Goals
+
+1. **Extensibility**: Allow any developer to add new LLM providers via extensions
+2. **Parity**: Extension-based providers should have feature parity with built-in providers
+3. **Performance**: Minimize overhead from WASM boundary crossings during streaming
+4. **Security**: Sandbox API key handling and network access appropriately
+5. **User Experience**: Seamless integration with existing model selectors and configuration UI
+
+### Functional Requirements
+
+1. Extensions can register one or more language model providers
+2. Extensions can define multiple models per provider
+3. Extensions handle authentication (API keys, OAuth, etc.)
+4. Extensions implement the streaming completion API
+5. Extensions can specify model capabilities (tools, images, thinking, etc.)
+6. Extensions can provide token counting logic
+7. Extensions can provide configuration UI components
+8. Extensions receive full request context for API customization
+
+### Non-Functional Requirements
+
+1. Streaming should feel as responsive as built-in providers
+2. Extension crashes should not crash Zed
+3. API keys should never be logged or exposed
+4. Extensions should be able to make arbitrary HTTP requests
+5. Settings should persist across sessions
+
+---
+
+## Proposed Architecture
+
+### High-Level Design
+
+```
+┌─────────────────────────────────────────────────────────────────┐
+│ Zed Application │
+├─────────────────────────────────────────────────────────────────┤
+│ ┌─────────────────────────────────────────────────────────────┐│
+│ │ LanguageModelRegistry ││
+│ │ ┌──────────────┐ ┌──────────────┐ ┌──────────────────┐ ││
+│ │ │ Built-in │ │ Extension │ │ Extension │ ││
+│ │ │ Providers │ │ Provider A │ │ Provider B │ ││
+│ │ │ (Anthropic, │ │ (WASM) │ │ (WASM) │ ││
+│ │ │ OpenAI...) │ │ │ │ │ ││
+│ │ └──────────────┘ └──────────────┘ └──────────────────┘ ││
+│ └─────────────────────────────────────────────────────────────┘│
+│ ↑ │
+│ │ │
+│ ┌───────────────────────────┴─────────────────────────────────┐│
+│ │ ExtensionLanguageModelProvider ││
+│ │ ┌─────────────────────────────────────────────────────────┐││
+│ │ │ • Bridges WASM extension to LanguageModelProvider trait │││
+│ │ │ • Manages streaming across WASM boundary │││
+│ │ │ • Handles credential storage via credentials_provider │││
+│ │ │ • Provides configuration UI scaffolding │││
+│ │ └─────────────────────────────────────────────────────────┘││
+│ └─────────────────────────────────────────────────────────────┘│
+│ ↑ │
+│ ┌───────────────────────────┴─────────────────────────────────┐│
+│ │ WasmHost / WasmExtension ││
+│ │ • Executes WASM module ││
+│ │ • Provides WIT interface for LLM operations ││
+│ │ • HTTP client for API calls ││
+│ └─────────────────────────────────────────────────────────────┘│
+└─────────────────────────────────────────────────────────────────┘
+```
+
+### New Components
+
+#### 1. `ExtensionLanguageModelProvider`
+
+A new struct in `extension_host` that implements `LanguageModelProvider` and wraps a WASM extension:
+
+```rust
+pub struct ExtensionLanguageModelProvider {
+ extension: WasmExtension,
+ provider_info: ExtensionLlmProviderInfo,
+ state: Entity<ExtensionLlmProviderState>,
+}
+
+struct ExtensionLlmProviderState {
+ is_authenticated: bool,
+ available_models: Vec<ExtensionLanguageModel>,
+}
+```
+
+#### 2. `ExtensionLanguageModel`
+
+Implements `LanguageModel` trait, delegating to WASM calls:
+
+```rust
+pub struct ExtensionLanguageModel {
+ extension: WasmExtension,
+ model_info: ExtensionLlmModelInfo,
+ provider_id: LanguageModelProviderId,
+}
+```
+
+#### 3. WIT Interface Extensions
+
+New WIT definitions for LLM provider functionality (see [WIT Interface Design](#wit-interface-design)).
+
+---
+
+## Implementation Phases
+
+### Phase 1: Foundation (2-3 weeks)
+
+**Goal**: Establish the core infrastructure for extension-based LLM providers.
+
+#### Tasks
+
+1. **Define WIT interface for LLM providers** (`extension_api/wit/since_v0.7.0/llm-provider.wit`)
+ - Provider metadata (id, name, icon)
+ - Model definitions (id, name, capabilities, limits)
+ - Credential management hooks
+ - Completion request/response types
+
+2. **Create `ExtensionLanguageModelProvider`** (`extension_host/src/wasm_host/llm_provider.rs`)
+ - Implement `LanguageModelProvider` trait
+ - Handle provider registration/unregistration
+ - Basic authentication state management
+
+3. **Create `ExtensionLanguageModel`** (`extension_host/src/wasm_host/llm_model.rs`)
+ - Implement `LanguageModel` trait
+ - Simple synchronous completion (non-streaming initially)
+
+4. **Update `ExtensionManifest`** (`extension/src/extension_manifest.rs`)
+ - Add `language_model_providers` field
+ - Parse provider configuration from `extension.toml`
+
+5. **Update extension loading** (`extension_host/src/extension_host.rs`)
+ - Detect LLM provider declarations in manifest
+ - Register providers with `LanguageModelRegistry`
+
+#### Deliverables
+- Extensions can register a provider that appears in model selector
+- Basic (non-streaming) completions work
+- Manual testing with a test extension
+
+### Phase 2: Streaming Support (2-3 weeks)
+
+**Goal**: Enable efficient streaming completions across the WASM boundary.
+
+#### Tasks
+
+1. **Design streaming protocol**
+ - Option A: Chunked responses via repeated WASM calls
+ - Option B: Callback-based streaming (preferred)
+ - Option C: Shared memory buffer with polling
+
+2. **Implement streaming in WIT**
+ ```wit
+ resource completion-stream {
+ next-event: func() -> result<option<completion-event>, string>;
+ }
+
+ export stream-completion: func(
+ provider-id: string,
+ model-id: string,
+ request: completion-request
+ ) -> result<completion-stream, string>;
+ ```
+
+3. **Implement `http-response-stream` integration**
+ - Extensions already have access to `fetch-stream`
+ - Need to parse SSE/chunked responses in WASM
+ - Map to completion events
+
+4. **Update `ExtensionLanguageModel::stream_completion`**
+ - Bridge WASM completion-stream to Rust BoxStream
+ - Handle backpressure and cancellation
+
+5. **Performance optimization**
+ - Batch small events to reduce WASM boundary crossings
+ - Consider using shared memory for large payloads
+
+#### Deliverables
+- Streaming completions work with acceptable latency
+- Performance benchmarks vs built-in providers
+
+### Phase 3: Full Feature Parity (2-3 weeks)
+
+**Goal**: Support all advanced features that built-in providers have.
+
+#### Tasks
+
+1. **Tool/Function calling support**
+ - Add tool definitions to request
+ - Parse tool use events from response
+ - Handle tool results in follow-up requests
+
+2. **Image support**
+ - Pass image data in messages
+ - Handle base64 encoding/size limits
+
+3. **Thinking/reasoning support** (for Claude-like models)
+ - `Thinking` and `RedactedThinking` events
+ - Thought signatures for tool calls
+
+4. **Token counting**
+ - WIT interface for `count_tokens`
+ - Allow extensions to provide custom tokenizers or call API
+
+5. **Prompt caching configuration**
+ - Cache control markers in messages
+ - Cache configuration reporting
+
+6. **Rate limiting and error handling**
+ - Standard error types in WIT
+ - Retry-after headers
+ - Rate limit events
+
+#### Deliverables
+- Extension providers can use tools
+- Extension providers can process images
+- Full error handling parity
+
+### Phase 4: Credential Management & Configuration UI (1-2 weeks)
+
+**Goal**: Secure credential storage and user-friendly configuration.
+
+#### Tasks
+
+1. **Credential storage integration**
+ - Use existing `credentials_provider` crate
+ - Extensions request credentials via WIT
+ - Credentials never exposed to WASM directly (only "is_authenticated" status)
+
+2. **API key input flow**
+ ```wit
+ import request-credential: func(
+ credential-type: credential-type,
+ label: string,
+ placeholder: string
+ ) -> result<bool, string>;
+ ```
+
+3. **Configuration view scaffolding**
+ - Generic configuration view that works for most providers
+ - Extensions can provide additional settings via JSON schema
+ - Settings stored in extension-specific namespace
+
+4. **Environment variable support**
+ - Allow specifying env var names for API keys
+ - Read from environment on startup
+
+#### Deliverables
+- Secure API key storage
+- Configuration UI for extension providers
+- Environment variable fallback
+
+### Phase 5: Testing & Documentation (1-2 weeks)
+
+**Goal**: Comprehensive testing and developer documentation.
+
+#### Tasks
+
+1. **Integration tests**
+ - Test extension loading and registration
+ - Test streaming completions
+ - Test error handling
+ - Test credential management
+
+2. **Performance tests**
+ - Latency benchmarks
+ - Memory usage under load
+ - Comparison with built-in providers
+
+3. **Example extensions**
+ - Simple OpenAI-compatible provider
+ - Provider with custom authentication
+ - Provider with tool support
+
+4. **Documentation**
+ - Extension developer guide
+ - API reference
+ - Migration guide for custom providers
+
+#### Deliverables
+- Full test coverage
+- Published documentation
+- Example extensions in `extensions/` directory
+
+### Phase 6: Migration of Built-in Providers (Optional, Long-term)
+
+**Goal**: Prove the extension system by migrating one or more built-in providers.
+
+#### Tasks
+
+1. **Select candidate provider** (suggest: Ollama or LM Studio - simplest API)
+2. **Create extension version**
+3. **Feature parity testing**
+4. **Performance comparison**
+5. **Gradual rollout (feature flag)
+
+---
+
+## WIT Interface Design
+
+### New File: `extension_api/wit/since_v0.7.0/llm-provider.wit`
+
+```wit
+interface llm-provider {
+ /// Information about a language model provider
+ record provider-info {
+ /// Unique identifier for the provider (e.g., "my-extension.my-provider")
+ id: string,
+ /// Display name for the provider
+ name: string,
+ /// Icon name from Zed's icon set (optional)
+ icon: option<string>,
+ }
+
+ /// Capabilities of a language model
+ record model-capabilities {
+ /// Whether the model supports image inputs
+ supports-images: bool,
+ /// Whether the model supports tool/function calling
+ supports-tools: bool,
+ /// Whether the model supports tool choice (auto/any/none)
+ supports-tool-choice-auto: bool,
+ supports-tool-choice-any: bool,
+ supports-tool-choice-none: bool,
+ /// Whether the model supports extended thinking
+ supports-thinking: bool,
+ /// The format for tool input schemas
+ tool-input-format: tool-input-format,
+ }
+
+ /// Format for tool input schemas
+ enum tool-input-format {
+ json-schema,
+ simplified,
+ }
+
+ /// Information about a specific model
+ record model-info {
+ /// Unique identifier for the model
+ id: string,
+ /// Display name for the model
+ name: string,
+ /// Maximum input token count
+ max-token-count: u64,
+ /// Maximum output tokens (optional)
+ max-output-tokens: option<u64>,
+ /// Model capabilities
+ capabilities: model-capabilities,
+ /// Whether this is the default model for the provider
+ is-default: bool,
+ /// Whether this is the default fast model
+ is-default-fast: bool,
+ }
+
+ /// A message in a completion request
+ record request-message {
+ role: message-role,
+ content: list<message-content>,
+ cache: bool,
+ }
+
+ enum message-role {
+ user,
+ assistant,
+ system,
+ }
+
+ /// Content within a message
+ variant message-content {
+ text(string),
+ image(image-data),
+ tool-use(tool-use),
+ tool-result(tool-result),
+ thinking(thinking-content),
+ redacted-thinking(string),
+ }
+
+ record image-data {
+ /// Base64-encoded image data
+ source: string,
+ /// Estimated dimensions
+ width: option<u32>,
+ height: option<u32>,
+ }
+
+ record tool-use {
+ id: string,
+ name: string,
+ input: string, // JSON string
+ thought-signature: option<string>,
+ }
+
+ record tool-result {
+ tool-use-id: string,
+ tool-name: string,
+ is-error: bool,
+ content: tool-result-content,
+ }
+
+ variant tool-result-content {
+ text(string),
+ image(image-data),
+ }
+
+ record thinking-content {
+ text: string,
+ signature: option<string>,
+ }
+
+ /// A tool definition
+ record tool-definition {
+ name: string,
+ description: string,
+ /// JSON Schema for input parameters
+ input-schema: string,
+ }
+
+ /// Tool choice preference
+ enum tool-choice {
+ auto,
+ any,
+ none,
+ }
+
+ /// A completion request
+ record completion-request {
+ messages: list<request-message>,
+ tools: list<tool-definition>,
+ tool-choice: option<tool-choice>,
+ stop-sequences: list<string>,
+ temperature: option<f32>,
+ thinking-allowed: bool,
+ /// Maximum tokens to generate
+ max-tokens: option<u64>,
+ }
+
+ /// Events emitted during completion streaming
+ variant completion-event {
+ /// Completion has started
+ started,
+ /// Text content
+ text(string),
+ /// Thinking/reasoning content
+ thinking(thinking-content),
+ /// Redacted thinking (encrypted)
+ redacted-thinking(string),
+ /// Tool use request
+ tool-use(tool-use),
+ /// Completion stopped
+ stop(stop-reason),
+ /// Token usage update
+ usage(token-usage),
+ }
+
+ enum stop-reason {
+ end-turn,
+ max-tokens,
+ tool-use,
+ }
+
+ record token-usage {
+ input-tokens: u64,
+ output-tokens: u64,
+ cache-creation-input-tokens: option<u64>,
+ cache-read-input-tokens: option<u64>,
+ }
+
+ /// A streaming completion response
+ resource completion-stream {
+ /// Get the next event from the stream.
+ /// Returns None when the stream is complete.
+ next-event: func() -> result<option<completion-event>, string>;
+ }
+
+ /// Credential types that can be requested
+ enum credential-type {
+ api-key,
+ oauth-token,
+ }
+}
+```
+
+### Updates to `extension_api/wit/since_v0.7.0/extension.wit`
+
+```wit
+world extension {
+ // ... existing imports ...
+ import llm-provider;
+
+ use llm-provider.{
+ provider-info, model-info, completion-request,
+ completion-stream, credential-type
+ };
+
+ /// Returns information about language model providers offered by this extension
+ export llm-providers: func() -> list<provider-info>;
+
+ /// Returns the models available for a provider
+ export llm-provider-models: func(provider-id: string) -> result<list<model-info>, string>;
+
+ /// Check if the provider is authenticated
+ export llm-provider-is-authenticated: func(provider-id: string) -> bool;
+
+ /// Attempt to authenticate the provider
+ export llm-provider-authenticate: func(provider-id: string) -> result<_, string>;
+
+ /// Reset credentials for the provider
+ export llm-provider-reset-credentials: func(provider-id: string) -> result<_, string>;
+
+ /// Count tokens for a request
+ export llm-count-tokens: func(
+ provider-id: string,
+ model-id: string,
+ request: completion-request
+ ) -> result<u64, string>;
+
+ /// Stream a completion
+ export llm-stream-completion: func(
+ provider-id: string,
+ model-id: string,
+ request: completion-request
+ ) -> result<completion-stream, string>;
+
+ /// Request a credential from the user
+ import llm-request-credential: func(
+ provider-id: string,
+ credential-type: credential-type,
+ label: string,
+ placeholder: string
+ ) -> result<bool, string>;
+
+ /// Get a stored credential
+ import llm-get-credential: func(provider-id: string) -> option<string>;
+
+ /// Store a credential
+ import llm-store-credential: func(provider-id: string, value: string) -> result<_, string>;
+
+ /// Delete a stored credential
+ import llm-delete-credential: func(provider-id: string) -> result<_, string>;
+}
+```
+
+---
+
+## Extension Manifest Changes
+
+### Updated `extension.toml` Schema
+
+```toml
+id = "my-llm-extension"
+name = "My LLM Provider"
+description = "Adds support for My LLM API"
+version = "1.0.0"
+schema_version = 1
+authors = ["Developer <dev@example.com>"]
+repository = "https://github.com/example/my-llm-extension"
+
+[lib]
+kind = "rust"
+version = "0.7.0"
+
+# New section for LLM providers
+[language_model_providers.my-provider]
+name = "My LLM"
+icon = "sparkle" # Optional, from Zed's icon set
+
+# Optional: Default models to show even before API connection
+[[language_model_providers.my-provider.models]]
+id = "my-model-large"
+name = "My Model Large"
+max_token_count = 200000
+max_output_tokens = 8192
+supports_images = true
+supports_tools = true
+
+[[language_model_providers.my-provider.models]]
+id = "my-model-small"
+name = "My Model Small"
+max_token_count = 100000
+max_output_tokens = 4096
+supports_images = false
+supports_tools = true
+
+# Optional: Environment variable for API key
+[language_model_providers.my-provider.auth]
+env_var = "MY_LLM_API_KEY"
+credential_label = "API Key"
+```
+
+### `ExtensionManifest` Changes
+
+```rust
+// In extension/src/extension_manifest.rs
+
+#[derive(Clone, Default, PartialEq, Eq, Debug, Deserialize, Serialize)]
+pub struct LanguageModelProviderManifestEntry {
+ pub name: String,
+ #[serde(default)]
+ pub icon: Option<String>,
+ #[serde(default)]
+ pub models: Vec<LanguageModelManifestEntry>,
+ #[serde(default)]
+ pub auth: Option<LanguageModelAuthConfig>,
+}
+
+#[derive(Clone, Default, PartialEq, Eq, Debug, Deserialize, Serialize)]
+pub struct LanguageModelManifestEntry {
+ pub id: String,
+ pub name: String,
+ #[serde(default)]
+ pub max_token_count: u64,
+ #[serde(default)]
+ pub max_output_tokens: Option<u64>,
+ #[serde(default)]
+ pub supports_images: bool,
+ #[serde(default)]
+ pub supports_tools: bool,
+ #[serde(default)]
+ pub supports_thinking: bool,
+}
+
+#[derive(Clone, Default, PartialEq, Eq, Debug, Deserialize, Serialize)]
+pub struct LanguageModelAuthConfig {
+ pub env_var: Option<String>,
+ pub credential_label: Option<String>,
+}
+
+// Add to ExtensionManifest struct:
+pub struct ExtensionManifest {
+ // ... existing fields ...
+ #[serde(default)]
+ pub language_model_providers: BTreeMap<Arc<str>, LanguageModelProviderManifestEntry>,
+}
+```
+
+---
+
+## Migration Plan for Built-in Providers
+
+This section analyzes each built-in provider and what would be required to implement them as extensions.
+
+### Provider Comparison Matrix
+
+| Provider | API Style | Auth Method | Special Features | Migration Complexity |
+|----------|-----------|-------------|------------------|---------------------|
+| Anthropic | REST/SSE | API Key | Thinking, Caching, Tool signatures | High |
+| OpenAI | REST/SSE | API Key | Reasoning effort, Prompt caching | Medium |
+| Google | REST/SSE | API Key | Thinking, Tool signatures | High |
+| Ollama | REST/SSE | None (local) | Dynamic model discovery | Low |
+| DeepSeek | REST/SSE | API Key | Reasoning mode | Medium |
+| OpenRouter | REST/SSE | API Key | Reasoning details, Model routing | Medium |
+| LM Studio | REST/SSE | None (local) | OpenAI-compatible | Low |
+| Bedrock | AWS SDK | AWS Credentials | Multiple underlying providers | High |
+| Zed Cloud | Zed Auth | Zed Account | Proxied providers | N/A (keep built-in) |
+
+### Provider-by-Provider Analysis
+
+#### Anthropic (`provider/anthropic.rs`)
+
+**Current Implementation Highlights:**
+- Uses `anthropic` crate for API types and streaming
+- Custom event mapper (`AnthropicEventMapper`) for SSE → completion events
+- Supports thinking/reasoning with thought signatures
+- Prompt caching with cache control markers
+- Beta headers for experimental features
+
+**Extension Requirements:**
+- Full SSE parsing in WASM
+- Complex event mapping logic
+- Thinking content with signatures
+- Cache configuration reporting
+
+**Unique Challenges:**
+```rust
+// Thought signatures in tool use
+pub struct LanguageModelToolUse {
+ pub thought_signature: Option<String>, // Anthropic-specific
+}
+
+// Thinking events with signatures
+Thinking { text: String, signature: Option<String> }
+```
+
+**Migration Approach:**
+1. Port `anthropic` crate types to extension-compatible structures
+2. Implement SSE parser in extension (can use existing `fetch-stream`)
+3. Map Anthropic events to generic completion events
+4. Handle beta headers via custom HTTP headers
+
+#### OpenAI (`provider/open_ai.rs`)
+
+**Current Implementation Highlights:**
+- Uses `open_ai` crate for API types
+- Tiktoken-based token counting
+- Parallel tool calls support
+- Reasoning effort parameter (o1/o3 models)
+
+**Extension Requirements:**
+- SSE parsing (standard format)
+- Token counting (could call API or use simplified estimate)
+- Tool call aggregation across chunks
+
+**Unique Challenges:**
+```rust
+// Reasoning effort for o-series models
+pub reasoning_effort: Option<String>, // "low", "medium", "high"
+
+// Prompt cache key (preview feature)
+pub prompt_cache_key: Option<String>,
+```
+
+**Migration Approach:**
+1. Standard SSE parsing
+2. Token counting via API or tiktoken WASM port
+3. Support reasoning_effort as model-specific config
+
+#### Google/Gemini (`provider/google.rs`)
+
+**Current Implementation Highlights:**
+- Uses `google_ai` crate
+- Different API structure from OpenAI/Anthropic
+- Thinking support similar to Anthropic
+- Tool signatures in function calls
+
+**Extension Requirements:**
+- Different request/response format
+- Thinking content handling
+- Tool signature preservation
+
+**Unique Challenges:**
+```rust
+// Google uses different content structure
+enum ContentPart {
+ Text { text: String },
+ InlineData { mime_type: String, data: String },
+ FunctionCall { name: String, args: Value },
+ FunctionResponse { name: String, response: Value },
+}
+```
+
+**Migration Approach:**
+1. Implement Google-specific request building
+2. Map Google events to generic completion events
+3. Handle thinking/function call signatures
+
+#### Ollama (`provider/ollama.rs`)
+
+**Current Implementation Highlights:**
+- Local-only, no authentication needed
+- Dynamic model discovery via API
+- OpenAI-compatible chat endpoint
+- Simple streaming format
+
+**Extension Requirements:**
+- API URL configuration
+- Model list fetching
+- Basic streaming
+
+**Why This is a Good First Migration Target:**
+- No authentication complexity
+- Simple API format
+- Dynamic model discovery is isolated
+- Good test case for local provider pattern
+
+**Migration Approach:**
+1. Configuration for API URL
+2. Model discovery endpoint call
+3. OpenAI-compatible streaming
+
+#### DeepSeek (`provider/deepseek.rs`)
+
+**Current Implementation Highlights:**
+- OpenAI-compatible API with extensions
+- Reasoner model support
+- Different handling for reasoning vs standard models
+
+**Extension Requirements:**
+- API key authentication
+- Model-specific request modifications
+- Reasoning content handling
+
+**Migration Approach:**
+1. Standard OpenAI-compatible base
+2. Special handling for reasoner model
+3. Temperature disabled for reasoning
+
+#### OpenRouter (`provider/open_router.rs`)
+
+**Current Implementation Highlights:**
+- Aggregates multiple providers
+- Dynamic model fetching
+- Reasoning details preservation
+- Tool call signatures
+
+**Extension Requirements:**
+- API key authentication
+- Model list from API
+- Reasoning details in responses
+
+**Migration Approach:**
+1. Model discovery from API
+2. Standard OpenAI-compatible streaming
+3. Preserve reasoning_details in events
+
+#### LM Studio (`provider/lmstudio.rs`)
+
+**Current Implementation Highlights:**
+- Local-only, OpenAI-compatible
+- Model discovery from API
+- Simple configuration
+
+**Why This is a Good First Migration Target:**
+- No authentication
+- OpenAI-compatible (reusable streaming code)
+- Similar to Ollama
+
+#### Bedrock (`provider/bedrock.rs`)
+
+**Current Implementation Highlights:**
+- AWS SDK-based authentication
+- Multiple authentication methods (IAM, Profile, etc.)
+- Proxies to Claude, Llama, etc.
+
+**Extension Requirements:**
+- AWS credential handling (complex)
+- AWS Signature V4 signing
+- Region configuration
+
+**Why This Should Stay Built-in (Initially):**
+- AWS credential management is complex
+- SDK dependency not easily portable to WASM
+- Security implications of AWS credentials in extensions
+
+---
+
+## Testing Strategy
+
+### Unit Tests
+
+```rust
+// extension_host/src/wasm_host/llm_provider_tests.rs
+
+#[gpui::test]
+async fn test_extension_provider_registration(cx: &mut TestAppContext) {
+ // Load test extension with LLM provider
+ // Verify provider appears in registry
+ // Verify models are listed correctly
+}
+
+#[gpui::test]
+async fn test_extension_streaming_completion(cx: &mut TestAppContext) {
+ // Create mock HTTP server
+ // Load extension
+ // Send completion request
+ // Verify streaming events received correctly
+}
+
+#[gpui::test]
+async fn test_extension_tool_calling(cx: &mut TestAppContext) {
+ // Test tool definitions are passed correctly
+ // Test tool use events are parsed
+ // Test tool results can be sent back
+}
+
+#[gpui::test]
+async fn test_extension_credential_management(cx: &mut TestAppContext) {
+ // Test credential storage
+ // Test credential retrieval
+ // Test authentication state
+}
+
+#[gpui::test]
+async fn test_extension_error_handling(cx: &mut TestAppContext) {
+ // Test API errors are propagated correctly
+ // Test rate limiting is handled
+ // Test network errors are handled
+}
+```
+
+### Integration Tests
+
+```rust
+// crates/extension_host/src/extension_store_test.rs (additions)
+
+#[gpui::test]
+async fn test_llm_extension_lifecycle(cx: &mut TestAppContext) {
+ // Install extension with LLM provider
+ // Verify provider registered
+ // Configure credentials
+ // Make completion request
+ // Uninstall extension
+ // Verify provider unregistered
+}
+```
+
+### Manual Testing Checklist
+
+1. **Provider Discovery**
+ - [ ] Extension provider appears in model selector
+ - [ ] Provider icon displays correctly
+ - [ ] Models list correctly
+
+2. **Authentication**
+ - [ ] API key prompt appears when not authenticated
+ - [ ] API key is stored securely
+ - [ ] Environment variable fallback works
+ - [ ] "Reset credentials" works
+
+3. **Completions**
+ - [ ] Basic text completion works
+ - [ ] Streaming is smooth (no jank)
+ - [ ] Long responses complete successfully
+ - [ ] Cancellation works
+
+4. **Advanced Features**
+ - [ ] Tool calling works (Agent panel)
+ - [ ] Image inputs work (if supported)
+ - [ ] Thinking/reasoning displays correctly
+
+5. **Error Handling**
+ - [ ] Invalid API key shows error
+ - [ ] Rate limiting shows appropriate message
+ - [ ] Network errors are handled gracefully
+
+6. **Performance**
+ - [ ] First token latency acceptable (<500ms overhead)
+ - [ ] Memory usage reasonable
+ - [ ] No memory leaks on repeated requests
+
+---
+
+## Security Considerations
+
+### Credential Handling
+
+1. **Never expose raw credentials to WASM**
+ - Extensions request credentials via import function
+ - Zed stores credentials in secure storage (keychain/credential manager)
+ - Extensions receive only "authenticated: true/false" status
+
+2. **Credential scope isolation**
+ - Each extension has its own credential namespace
+ - Extensions cannot access other extensions' credentials
+ - Provider ID is prefixed with extension ID
+
+3. **Audit logging**
+ - Log when credentials are accessed (not the values)
+ - Log when credentials are modified
+
+### Network Access
+
+1. **HTTP request validation**
+ - Extensions already have HTTP access via `fetch` / `fetch-stream`
+ - Consider domain allowlisting for LLM providers
+ - Log outbound requests for debugging
+
+2. **Request/Response inspection**
+ - API keys in headers should be redacted in logs
+ - Response bodies may contain sensitive data
+
+### Extension Sandbox
+
+1. **WASM isolation**
+ - Extensions run in WASM sandbox
+ - Cannot access filesystem outside work directory
+ - Cannot access other extensions' data
+
+2. **Resource limits**
+ - Memory limits per extension
+ - CPU time limits (epoch-based interruption already exists)
+ - Concurrent request limits
+
+### Capability Requirements
+
+```toml
+# Extensions with LLM providers should declare:
+[[capabilities]]
+kind = "network:http"
+domains = ["api.example.com"] # Optional domain restriction
+
+[[capabilities]]
+kind = "credential:store"
+```
+
+---
+
+## Appendix: Provider-Specific Requirements
+
+### A. Anthropic Implementation Details
+
+**Request Format:**
+```json
+{
+ "model": "claude-sonnet-4-20250514",
+ "max_tokens": 8192,
+ "messages": [
+ {"role": "user", "content": [{"type": "text", "text": "Hello"}]}
+ ],
+ "system": [{"type": "text", "text": "You are helpful"}],
+ "tools": [...],
+ "thinking": {"type": "enabled", "budget_tokens": 10000}
+}
+```
+
+**SSE Events:**
+- `message_start` - Contains message ID, model, usage
+- `content_block_start` - Starts text/tool_use/thinking block
+- `content_block_delta` - Incremental content (text_delta, input_json_delta, thinking_delta)
+- `content_block_stop` - Block complete
+- `message_delta` - Stop reason, final usage
+- `message_stop` - End of message
+
+**Special Considerations:**
+- Beta headers for thinking: `anthropic-beta: interleaved-thinking-2025-05-14`
+- Cache control markers in messages
+- Thought signatures on tool uses
+
+### B. OpenAI Implementation Details
+
+**Request Format:**
+```json
+{
+ "model": "gpt-4o",
+ "messages": [
+ {"role": "system", "content": "You are helpful"},
+ {"role": "user", "content": "Hello"}
+ ],
+ "stream": true,
+ "tools": [...],
+ "max_completion_tokens": 4096
+}
+```
+
+**SSE Events:**
+```
+data: {"choices":[{"delta":{"content":"Hello"}}]}
+data: {"choices":[{"delta":{"tool_calls":[...]}}]}
+data: [DONE]
+```
+
+**Special Considerations:**
+- `reasoning_effort` for o-series models
+- `parallel_tool_calls` option
+- Token counting via tiktoken
+
+### C. Google/Gemini Implementation Details
+
+**Request Format:**
+```json
+{
+ "contents": [
+ {"role": "user", "parts": [{"text": "Hello"}]}
+ ],
+ "generationConfig": {
+ "maxOutputTokens": 8192,
+ "temperature": 0.7
+ },
+ "tools": [...]
+}
+```
+
+**Response Format:**
+```json
+{
+ "candidates": [{
+ "content": {
+ "parts": [
+ {"text": "Response"},
+ {"functionCall": {"name": "...", "args": {...}}}
+ ]
+ }
+ }]
+}
+```
+
+**Special Considerations:**
+- Different streaming format (not SSE, line-delimited JSON)
+- Tool signatures in function calls
+- Thinking support similar to Anthropic
+
+### D. OpenAI-Compatible Providers (Ollama, LM Studio, DeepSeek)
+
+These providers can share common implementation:
+
+**Shared Code:**
+```rust
+// In extension
+fn stream_openai_compatible(
+ api_url: &str,
+ api_key: Option<&str>,
+ request: CompletionRequest,
+) -> Result<CompletionStream, String> {
+ let request_body = build_openai_request(request);
+ let stream = http_client::fetch_stream(HttpRequest {
+ method: HttpMethod::Post,
+ url: format!("{}/v1/chat/completions", api_url),
+ headers: build_headers(api_key),
+ body: Some(serde_json::to_vec(&request_body)?),
+ redirect_policy: RedirectPolicy::NoFollow,
+ })?;
+
+ Ok(OpenAiStreamParser::new(stream))
+}
+```
+
+### E. Example Extension: Simple OpenAI-Compatible Provider
+
+```rust
+// src/my_provider.rs
+use zed_extension_api::{self as zed, Result};
+use zed_extension_api::http_client::{HttpMethod, HttpRequest, RedirectPolicy};
+
+struct MyLlmExtension {
+ api_key: Option<String>,
+}
+
+impl zed::Extension for MyLlmExtension {
+ fn new() -> Self {
+ Self { api_key: None }
+ }
+
+ fn llm_providers(&self) -> Vec<zed::LlmProviderInfo> {
+ vec![zed::LlmProviderInfo {
+ id: "my-provider".into(),
+ name: "My LLM Provider".into(),
+ icon: Some("sparkle".into()),
+ }]
+ }
+
+ fn llm_provider_models(&self, provider_id: &str) -> Result<Vec<zed::LlmModelInfo>> {
+ Ok(vec![
+ zed::LlmModelInfo {
+ id: "my-model".into(),
+ name: "My Model".into(),
+ max_token_count: 128000,
+ max_output_tokens: Some(4096),
+ capabilities: zed::LlmModelCapabilities {
+ supports_images: true,
+ supports_tools: true,
+ ..Default::default()
+ },
+ is_default: true,
+ is_default_fast: false,
+ }
+ ])
+ }
+
+ fn llm_provider_is_authenticated(&self, _provider_id: &str) -> bool {
+ self.api_key.is_some() || std::env::var("MY_API_KEY").is_ok()
+ }
+
+ fn llm_provider_authenticate(&mut self, provider_id: &str) -> Result<()> {
+ if let Some(key) = zed::llm_get_credential(provider_id)? {
+ self.api_key = Some(key);
+ return Ok(());
+ }
+
+ if zed::llm_request_credential(
+ provider_id,
+ zed::CredentialType::ApiKey,
+ "API Key",
+ "Enter your API key",
+ )? {
+ self.api_key = zed::llm_get_credential(provider_id)?;
+ }
+
+ Ok(())
+ }
+
+ fn llm_stream_completion(
+ &self,
+ provider_id: &str,
+ model_id: &str,
+ request: zed::LlmCompletionRequest,
+ ) -> Result<zed::LlmCompletionStream> {
+ let api_key = self.api_key.as_ref()
+ .or_else(|| std::env::var("MY_API_KEY").ok().as_ref())
+ .ok_or("Not authenticated")?;
+
+ let body = serde_json::json!({
+ "model": model_id,
+ "messages": self.convert_messages(&request.messages),
+ "stream": true,
+ "max_tokens": request.max_tokens.unwrap_or(4096),
+ });
+
+ let stream = HttpRequest::builder()
+ .method(HttpMethod::Post)
+ .url("https://api.my-provider.com/v1/chat/completions")
+ .header("Authorization", format!("Bearer {}", api_key))
+ .header("Content-Type", "application/json")
+ .body(serde_json::to_vec(&body)?)
+ .build()?
+ .fetch_stream()?;
+
+ Ok(zed::LlmCompletionStream::new(OpenAiStreamParser::new(stream)))
+ }
+}
+
+zed::register_extension!(MyLlmExtension);
+```
+
+---
+
+## Timeline Summary
+
+| Phase | Duration | Key Deliverables |
+|-------|----------|------------------|
+| 1. Foundation | 2-3 weeks | WIT interface, basic provider registration |
+| 2. Streaming | 2-3 weeks | Efficient streaming across WASM boundary |
+| 3. Full Features | 2-3 weeks | Tools, images, thinking support |
+| 4. Credentials & UI | 1-2 weeks | Secure credentials, configuration UI |
+| 5. Testing & Docs | 1-2 weeks | Tests, documentation, examples |
+| 6. Migration (optional) | Ongoing | Migrate built-in providers |
+
+**Total estimated time: 8-13 weeks**
+
+---
+
+## Open Questions
+
+1. **Streaming efficiency**: Is callback-based streaming feasible in WASM, or should we use polling?
+
+2. **Token counting**: Should we require extensions to implement token counting, or provide a fallback estimation?
+
+3. **Configuration UI**: Should extensions be able to provide custom UI components, or just JSON schema-driven forms?
+
+4. **Provider priorities**: Should extension providers appear before or after built-in providers in the selector?
+
+5. **Backward compatibility**: How do we handle extensions built against older WIT versions when adding new LLM features?
+
+6. **Rate limiting**: Should the host help with rate limiting, or leave it entirely to extensions?
+
+---
+
+## Conclusion
+
+This plan provides a comprehensive roadmap for implementing Language Model Provider Extensions in Zed. The phased approach allows for incremental delivery of value while building toward full feature parity with built-in providers.
+
+The key architectural decisions are:
+1. **WIT-based interface** for WASM interop, consistent with existing extension patterns
+2. **Streaming via resources** to minimize WASM boundary crossing overhead
+3. **Host-managed credentials** for security
+4. **Manifest-based discovery** for static model information
+
+The migration analysis shows that simpler providers (Ollama, LM Studio) can be migrated first as proof of concept, while more complex providers (Anthropic, Bedrock) may remain built-in initially.
@@ -22,4 +22,4 @@ args = ["hello from a child process!"]
[[capabilities]]
kind = "process:exec"
command = "cmd"
-args = ["/C", "echo", "hello from a child process!"]
+args = ["/C", "echo", "hello from a child process!"]