interface llm-provider { use http-client.{http-request, http-response-with-status}; /// Information about a language model provider. record provider-info { /// Unique identifier for the provider (e.g. "my-extension.my-provider"). id: string, /// Display name for the provider. name: string, /// Path to an SVG icon file relative to the extension root (e.g. "icons/provider.svg"). icon: option, } /// Capabilities of a language model. record model-capabilities { /// Whether the model supports image inputs. supports-images: bool, /// Whether the model supports tool/function calling. supports-tools: bool, /// Whether the model supports the "auto" tool choice. supports-tool-choice-auto: bool, /// Whether the model supports the "any" tool choice. supports-tool-choice-any: bool, /// Whether the model supports the "none" tool choice. supports-tool-choice-none: bool, /// Whether the model supports extended thinking/reasoning. supports-thinking: bool, /// The format for tool input schemas. tool-input-format: tool-input-format, } /// Format for tool input schemas. enum tool-input-format { /// Standard JSON Schema format. json-schema, /// Simplified schema format for certain providers. simplified, } /// Information about a specific model. record model-info { /// Unique identifier for the model. id: string, /// Display name for the model. name: string, /// Maximum input token count. max-token-count: u64, /// Maximum output tokens (optional). max-output-tokens: option, /// Model capabilities. capabilities: model-capabilities, /// Whether this is the default model for the provider. is-default: bool, /// Whether this is the default fast model. is-default-fast: bool, } /// The role of a message participant. enum message-role { /// User message. user, /// Assistant message. assistant, /// System message. system, } /// A message in a completion request. record request-message { /// The role of the message sender. role: message-role, /// The content of the message. content: list, /// Whether to cache this message for prompt caching. cache: bool, } /// Content within a message. variant message-content { /// Plain text content. text(string), /// Image content. image(image-data), /// A tool use request from the assistant. tool-use(tool-use), /// A tool result from the user. tool-result(tool-result), /// Thinking/reasoning content. thinking(thinking-content), /// Redacted/encrypted thinking content. redacted-thinking(string), } /// Image data for vision models. record image-data { /// Base64-encoded image data. source: string, /// Image width in pixels (optional). width: option, /// Image height in pixels (optional). height: option, } /// A tool use request from the model. record tool-use { /// Unique identifier for this tool use. id: string, /// The name of the tool being used. name: string, /// JSON string of the tool input arguments. input: string, /// Whether the input JSON is complete (false while streaming, true when done). is-input-complete: bool, /// Thought signature for providers that support it (e.g., Anthropic). thought-signature: option, } /// A tool result to send back to the model. record tool-result { /// The ID of the tool use this is a result for. tool-use-id: string, /// The name of the tool. tool-name: string, /// Whether this result represents an error. is-error: bool, /// The content of the result. content: tool-result-content, } /// Content of a tool result. variant tool-result-content { /// Text result. text(string), /// Image result. image(image-data), } /// Thinking/reasoning content from models that support extended thinking. record thinking-content { /// The thinking text. text: string, /// Signature for the thinking block (provider-specific). signature: option, } /// A tool definition for function calling. record tool-definition { /// The name of the tool. name: string, /// Description of what the tool does. description: string, /// JSON Schema for input parameters. input-schema: string, } /// Tool choice preference for the model. enum tool-choice { /// Let the model decide whether to use tools. auto, /// Force the model to use at least one tool. any, /// Prevent the model from using tools. none, } /// A completion request to send to the model. record completion-request { /// The messages in the conversation. messages: list, /// Available tools for the model to use. tools: list, /// Tool choice preference. tool-choice: option, /// Stop sequences to end generation. stop-sequences: list, /// Temperature for sampling (0.0-1.0). temperature: option, /// Whether thinking/reasoning is allowed. thinking-allowed: bool, /// Maximum tokens to generate. max-tokens: option, } /// Events emitted during completion streaming. variant completion-event { /// Completion has started. started, /// Text content chunk. text(string), /// Thinking/reasoning content chunk. thinking(thinking-content), /// Redacted thinking (encrypted) chunk. redacted-thinking(string), /// Tool use request from the model. tool-use(tool-use), /// JSON parse error when parsing tool input. tool-use-json-parse-error(tool-use-json-parse-error), /// Completion stopped. stop(stop-reason), /// Token usage update. usage(token-usage), /// Reasoning details (provider-specific JSON). reasoning-details(string), } /// Error information when tool use JSON parsing fails. record tool-use-json-parse-error { /// The tool use ID. id: string, /// The tool name. tool-name: string, /// The raw input that failed to parse. raw-input: string, /// The parse error message. error: string, } /// Reason the completion stopped. enum stop-reason { /// The model finished generating. end-turn, /// Maximum tokens reached. max-tokens, /// The model wants to use a tool. tool-use, /// The model refused to respond. refusal, } /// Token usage statistics. record token-usage { /// Number of input tokens used. input-tokens: u64, /// Number of output tokens generated. output-tokens: u64, /// Tokens used for cache creation (if supported). cache-creation-input-tokens: option, /// Tokens read from cache (if supported). cache-read-input-tokens: option, } /// Cache configuration for prompt caching. record cache-configuration { /// Maximum number of cache anchors. max-cache-anchors: u32, /// Whether caching should be applied to tool definitions. should-cache-tool-definitions: bool, /// Minimum token count for a message to be cached. min-total-token-count: u64, } /// Configuration for starting an OAuth web authentication flow. record oauth-web-auth-config { /// The URL to open in the user's browser to start authentication. /// This should include client_id, redirect_uri, scope, state, etc. /// Use `{port}` as a placeholder in the URL - it will be replaced with /// the actual localhost port before opening the browser. /// Example: "https://example.com/oauth?redirect_uri=http://127.0.0.1:{port}/callback" auth-url: string, /// The path to listen on for the OAuth callback (e.g., "/callback"). /// A localhost server will be started to receive the redirect. callback-path: string, /// Timeout in seconds to wait for the callback (default: 300 = 5 minutes). timeout-secs: option, } /// Result of an OAuth web authentication flow. record oauth-web-auth-result { /// The full callback URL that was received, including query parameters. /// The extension is responsible for parsing the code, state, etc. callback-url: string, /// The port that was used for the localhost callback server. port: u32, } /// Get a stored credential for this provider. get-credential: func(provider-id: string) -> option; /// Store a credential for this provider. store-credential: func(provider-id: string, value: string) -> result<_, string>; /// Delete a stored credential for this provider. delete-credential: func(provider-id: string) -> result<_, string>; /// Read an environment variable. get-env-var: func(name: string) -> option; /// Start an OAuth web authentication flow. /// /// This will: /// 1. Start a localhost server to receive the OAuth callback /// 2. Open the auth URL in the user's default browser /// 3. Wait for the callback (up to the timeout) /// 4. Return the callback URL with query parameters /// /// The extension is responsible for: /// - Constructing the auth URL with client_id, redirect_uri, scope, state, etc. /// - Parsing the callback URL to extract the authorization code /// - Exchanging the code for tokens using fetch-fallible from http-client oauth-start-web-auth: func(config: oauth-web-auth-config) -> result; /// Make an HTTP request for OAuth token exchange. /// /// This is a convenience wrapper around http-client's fetch-fallible for OAuth flows. /// Unlike the standard fetch, this does not treat non-2xx responses as errors, /// allowing proper handling of OAuth error responses. oauth-send-http-request: func(request: http-request) -> result; /// Open a URL in the user's default browser. /// /// Useful for OAuth flows that need to open a browser but handle the /// callback differently (e.g., polling-based flows). oauth-open-browser: func(url: string) -> result<_, string>; /// Information needed to display the device flow prompt modal to the user. record device-flow-prompt-info { /// The user code to display (e.g., "ABC-123"). user-code: string, /// The URL the user needs to visit to authorize (for the "Connect" button). verification-url: string, /// The headline text for the modal (e.g., "Use GitHub Copilot in Zed."). headline: string, /// A description to show below the headline (e.g., "Using Copilot requires an active subscription on GitHub."). description: string, /// Label for the connect button (e.g., "Connect to GitHub"). connect-button-label: string, /// Success headline shown when authorization completes. success-headline: string, /// Success message shown when authorization completes. success-message: string, } }