Delete llm-provider.wit

Richard Feldman created

Change summary

crates/extension_api/wit/since_v0.8.0/llm-provider.wit | 362 ------------
1 file changed, 362 deletions(-)

Detailed changes

crates/extension_api/wit/since_v0.8.0/llm-provider.wit 🔗

@@ -1,362 +0,0 @@
-interface llm-provider {
-    use http-client.{http-request, http-response-with-status};
-
-    /// Information about a language model provider.
-    record provider-info {
-        /// Unique identifier for the provider (e.g. "my-extension.my-provider").
-        id: string,
-        /// Display name for the provider.
-        name: string,
-        /// Path to an SVG icon file relative to the extension root (e.g. "icons/provider.svg").
-        icon: option<string>,
-    }
-
-    /// Capabilities of a language model.
-    record model-capabilities {
-        /// Whether the model supports image inputs.
-        supports-images: bool,
-        /// Whether the model supports tool/function calling.
-        supports-tools: bool,
-        /// Whether the model supports the "auto" tool choice.
-        supports-tool-choice-auto: bool,
-        /// Whether the model supports the "any" tool choice.
-        supports-tool-choice-any: bool,
-        /// Whether the model supports the "none" tool choice.
-        supports-tool-choice-none: bool,
-        /// Whether the model supports extended thinking/reasoning.
-        supports-thinking: bool,
-        /// The format for tool input schemas.
-        tool-input-format: tool-input-format,
-    }
-
-    /// Format for tool input schemas.
-    enum tool-input-format {
-        /// Standard JSON Schema format.
-        json-schema,
-        /// A subset of JSON Schema supported by Google AI.
-        /// See https://ai.google.dev/api/caching#Schema
-        json-schema-subset,
-        /// Simplified schema format for certain providers.
-        simplified,
-    }
-
-    /// Information about a specific model.
-    record model-info {
-        /// Unique identifier for the model.
-        id: string,
-        /// Display name for the model.
-        name: string,
-        /// Maximum input token count.
-        max-token-count: u64,
-        /// Maximum output tokens (optional).
-        max-output-tokens: option<u64>,
-        /// Model capabilities.
-        capabilities: model-capabilities,
-        /// Whether this is the default model for the provider.
-        is-default: bool,
-        /// Whether this is the default fast model.
-        is-default-fast: bool,
-    }
-
-    /// The role of a message participant.
-    enum message-role {
-        /// User message.
-        user,
-        /// Assistant message.
-        assistant,
-        /// System message.
-        system,
-    }
-
-    /// A message in a completion request.
-    record request-message {
-        /// The role of the message sender.
-        role: message-role,
-        /// The content of the message.
-        content: list<message-content>,
-        /// Whether to cache this message for prompt caching.
-        cache: bool,
-    }
-
-    /// Content within a message.
-    variant message-content {
-        /// Plain text content.
-        text(string),
-        /// Image content.
-        image(image-data),
-        /// A tool use request from the assistant.
-        tool-use(tool-use),
-        /// A tool result from the user.
-        tool-result(tool-result),
-        /// Thinking/reasoning content.
-        thinking(thinking-content),
-        /// Redacted/encrypted thinking content.
-        redacted-thinking(string),
-    }
-
-    /// Image data for vision models.
-    record image-data {
-        /// Base64-encoded image data.
-        source: string,
-        /// Image width in pixels (optional).
-        width: option<u32>,
-        /// Image height in pixels (optional).
-        height: option<u32>,
-    }
-
-    /// A tool use request from the model.
-    record tool-use {
-        /// Unique identifier for this tool use.
-        id: string,
-        /// The name of the tool being used.
-        name: string,
-        /// JSON string of the tool input arguments.
-        input: string,
-        /// Whether the input JSON is complete (false while streaming, true when done).
-        is-input-complete: bool,
-        /// Thought signature for providers that support it (e.g., Anthropic).
-        thought-signature: option<string>,
-    }
-
-    /// A tool result to send back to the model.
-    record tool-result {
-        /// The ID of the tool use this is a result for.
-        tool-use-id: string,
-        /// The name of the tool.
-        tool-name: string,
-        /// Whether this result represents an error.
-        is-error: bool,
-        /// The content of the result.
-        content: tool-result-content,
-    }
-
-    /// Content of a tool result.
-    variant tool-result-content {
-        /// Text result.
-        text(string),
-        /// Image result.
-        image(image-data),
-    }
-
-    /// Thinking/reasoning content from models that support extended thinking.
-    record thinking-content {
-        /// The thinking text.
-        text: string,
-        /// Signature for the thinking block (provider-specific).
-        signature: option<string>,
-    }
-
-    /// A tool definition for function calling.
-    record tool-definition {
-        /// The name of the tool.
-        name: string,
-        /// Description of what the tool does.
-        description: string,
-        /// JSON Schema for input parameters.
-        input-schema: string,
-    }
-
-    /// Tool choice preference for the model.
-    enum tool-choice {
-        /// Let the model decide whether to use tools.
-        auto,
-        /// Force the model to use at least one tool.
-        any,
-        /// Prevent the model from using tools.
-        none,
-    }
-
-    /// A completion request to send to the model.
-    record completion-request {
-        /// The messages in the conversation.
-        messages: list<request-message>,
-        /// Available tools for the model to use.
-        tools: list<tool-definition>,
-        /// Tool choice preference.
-        tool-choice: option<tool-choice>,
-        /// Stop sequences to end generation.
-        stop-sequences: list<string>,
-        /// Temperature for sampling (0.0-1.0).
-        temperature: option<f32>,
-        /// Whether thinking/reasoning is allowed.
-        thinking-allowed: bool,
-        /// Maximum tokens to generate.
-        max-tokens: option<u64>,
-    }
-
-    /// Events emitted during completion streaming.
-    variant completion-event {
-        /// Completion has started.
-        started,
-        /// Text content chunk.
-        text(string),
-        /// Thinking/reasoning content chunk.
-        thinking(thinking-content),
-        /// Redacted thinking (encrypted) chunk.
-        redacted-thinking(string),
-        /// Tool use request from the model.
-        tool-use(tool-use),
-        /// JSON parse error when parsing tool input.
-        tool-use-json-parse-error(tool-use-json-parse-error),
-        /// Completion stopped.
-        stop(stop-reason),
-        /// Token usage update.
-        usage(token-usage),
-        /// Reasoning details (provider-specific JSON).
-        reasoning-details(string),
-    }
-
-    /// Error information when tool use JSON parsing fails.
-    record tool-use-json-parse-error {
-        /// The tool use ID.
-        id: string,
-        /// The tool name.
-        tool-name: string,
-        /// The raw input that failed to parse.
-        raw-input: string,
-        /// The parse error message.
-        error: string,
-    }
-
-    /// Reason the completion stopped.
-    enum stop-reason {
-        /// The model finished generating.
-        end-turn,
-        /// Maximum tokens reached.
-        max-tokens,
-        /// The model wants to use a tool.
-        tool-use,
-        /// The model refused to respond.
-        refusal,
-    }
-
-    /// Token usage statistics.
-    record token-usage {
-        /// Number of input tokens used.
-        input-tokens: u64,
-        /// Number of output tokens generated.
-        output-tokens: u64,
-        /// Tokens used for cache creation (if supported).
-        cache-creation-input-tokens: option<u64>,
-        /// Tokens read from cache (if supported).
-        cache-read-input-tokens: option<u64>,
-    }
-
-    /// Cache configuration for prompt caching.
-    record cache-configuration {
-        /// Maximum number of cache anchors.
-        max-cache-anchors: u32,
-        /// Whether caching should be applied to tool definitions.
-        should-cache-tool-definitions: bool,
-        /// Minimum token count for a message to be cached.
-        min-total-token-count: u64,
-    }
-
-    /// Configuration for starting an OAuth web authentication flow.
-    record oauth-web-auth-config {
-        /// The URL to open in the user's browser to start authentication.
-        /// This should include client_id, redirect_uri, scope, state, etc.
-        /// Use `{port}` as a placeholder in the URL - it will be replaced with
-        /// the actual localhost port before opening the browser.
-        /// Example: "https://example.com/oauth?redirect_uri=http://127.0.0.1:{port}/callback"
-        auth-url: string,
-        /// The path to listen on for the OAuth callback (e.g., "/callback").
-        /// A localhost server will be started to receive the redirect.
-        callback-path: string,
-        /// Timeout in seconds to wait for the callback (default: 300 = 5 minutes).
-        timeout-secs: option<u32>,
-    }
-
-    /// Result of an OAuth web authentication flow.
-    record oauth-web-auth-result {
-        /// The full callback URL that was received, including query parameters.
-        /// The extension is responsible for parsing the code, state, etc.
-        callback-url: string,
-        /// The port that was used for the localhost callback server.
-        port: u32,
-    }
-
-    /// Get a stored credential for this provider.
-    get-credential: func(provider-id: string) -> option<string>;
-
-    /// Store a credential for this provider.
-    store-credential: func(provider-id: string, value: string) -> result<_, string>;
-
-    /// Delete a stored credential for this provider.
-    delete-credential: func(provider-id: string) -> result<_, string>;
-
-    /// Read an environment variable.
-    get-env-var: func(name: string) -> option<string>;
-
-    /// Start an OAuth web authentication flow.
-    ///
-    /// This will:
-    /// 1. Start a localhost server to receive the OAuth callback
-    /// 2. Open the auth URL in the user's default browser
-    /// 3. Wait for the callback (up to the timeout)
-    /// 4. Return the callback URL with query parameters
-    ///
-    /// The extension is responsible for:
-    /// - Constructing the auth URL with client_id, redirect_uri, scope, state, etc.
-    /// - Parsing the callback URL to extract the authorization code
-    /// - Exchanging the code for tokens using fetch-fallible from http-client
-    oauth-start-web-auth: func(config: oauth-web-auth-config) -> result<oauth-web-auth-result, string>;
-
-    /// Make an HTTP request for OAuth token exchange.
-    ///
-    /// This is a convenience wrapper around http-client's fetch-fallible for OAuth flows.
-    /// Unlike the standard fetch, this does not treat non-2xx responses as errors,
-    /// allowing proper handling of OAuth error responses.
-    oauth-send-http-request: func(request: http-request) -> result<http-response-with-status, string>;
-
-    /// Open a URL in the user's default browser.
-    ///
-    /// Useful for OAuth flows that need to open a browser but handle the
-    /// callback differently (e.g., polling-based flows).
-    oauth-open-browser: func(url: string) -> result<_, string>;
-
-    /// Provider settings from user configuration.
-    /// Extensions can use this to allow custom API URLs, custom models, etc.
-    record provider-settings {
-        /// Custom API URL override (if configured by the user).
-        api-url: option<string>,
-        /// Custom models configured by the user.
-        available-models: list<custom-model-config>,
-    }
-
-    /// Configuration for a custom model defined by the user.
-    record custom-model-config {
-        /// The model's API identifier.
-        name: string,
-        /// Display name for the UI.
-        display-name: option<string>,
-        /// Maximum input token count.
-        max-tokens: u64,
-        /// Maximum output tokens (optional).
-        max-output-tokens: option<u64>,
-        /// Thinking budget for models that support extended thinking (None = auto).
-        thinking-budget: option<u32>,
-    }
-
-    /// Get provider-specific settings configured by the user.
-    /// Returns settings like custom API URLs and custom model configurations.
-    get-provider-settings: func(provider-id: string) -> option<provider-settings>;
-
-    /// Information needed to display the device flow prompt modal to the user.
-    record device-flow-prompt-info {
-        /// The user code to display (e.g., "ABC-123").
-        user-code: string,
-        /// The URL the user needs to visit to authorize (for the "Connect" button).
-        verification-url: string,
-        /// The headline text for the modal (e.g., "Use GitHub Copilot in Zed.").
-        headline: string,
-        /// A description to show below the headline (e.g., "Using Copilot requires an active subscription on GitHub.").
-        description: string,
-        /// Label for the connect button (e.g., "Connect to GitHub").
-        connect-button-label: string,
-        /// Success headline shown when authorization completes.
-        success-headline: string,
-        /// Success message shown when authorization completes.
-        success-message: string,
-    }
-}