From cb695ce71ccc0ba2a16d089ceea47ffac3562052 Mon Sep 17 00:00:00 2001 From: Richard Feldman Date: Thu, 18 Dec 2025 20:17:48 -0500 Subject: [PATCH] Restore the since 0.8.0 stuff to origin/main --- .../wit/since_v0.8.0/extension.wit | 75 ------------------- .../wit/since_v0.8.0/http-client.wit | 17 ----- 2 files changed, 92 deletions(-) diff --git a/crates/extension_api/wit/since_v0.8.0/extension.wit b/crates/extension_api/wit/since_v0.8.0/extension.wit index 7440984f5d171ccfadd212760d41e15ce7325535..8195162b89a420d322970bf894bd9ec824119087 100644 --- a/crates/extension_api/wit/since_v0.8.0/extension.wit +++ b/crates/extension_api/wit/since_v0.8.0/extension.wit @@ -8,7 +8,6 @@ world extension { import platform; import process; import nodejs; - import llm-provider; use common.{env-vars, range}; use context-server.{context-server-configuration}; @@ -16,11 +15,6 @@ world extension { use lsp.{completion, symbol}; use process.{command}; use slash-command.{slash-command, slash-command-argument-completion, slash-command-output}; - use llm-provider.{ - provider-info, model-info, completion-request, - cache-configuration, completion-event, token-usage, - device-flow-prompt-info - }; /// Initializes the extension. export init-extension: func(); @@ -170,73 +164,4 @@ world extension { export dap-config-to-scenario: func(config: debug-config) -> result; export dap-locator-create-scenario: func(locator-name: string, build-config-template: build-task-template, resolved-label: string, debug-adapter-name: string) -> option; export run-dap-locator: func(locator-name: string, config: resolved-task) -> result; - - /// Returns information about language model providers offered by this extension. - export llm-providers: func() -> list; - - /// Returns the models available for a provider. - export llm-provider-models: func(provider-id: string) -> result, string>; - - /// Returns markdown content to display in the provider's settings UI. - /// This can include setup instructions, links to documentation, etc. - export llm-provider-settings-markdown: func(provider-id: string) -> option; - - /// Check if the provider is authenticated. - export llm-provider-is-authenticated: func(provider-id: string) -> bool; - - /// Start an OAuth device flow sign-in. - /// This is called when the user explicitly clicks "Sign in with GitHub" or similar. - /// - /// The device flow works as follows: - /// 1. Extension requests a device code from the OAuth provider - /// 2. Extension returns prompt info including user code and verification URL - /// 3. Host displays a modal with the prompt info - /// 4. Host calls llm-provider-poll-device-flow-sign-in - /// 5. Extension polls for the access token while user authorizes in browser - /// 6. Once authorized, extension stores the credential and returns success - /// - /// Returns information needed to display the device flow prompt modal. - export llm-provider-start-device-flow-sign-in: func(provider-id: string) -> result; - - /// Poll for device flow sign-in completion. - /// This is called after llm-provider-start-device-flow-sign-in returns the user code. - /// The extension should poll the OAuth provider until the user authorizes or the flow times out. - /// Returns Ok(()) on successful authentication, or an error message on failure. - export llm-provider-poll-device-flow-sign-in: func(provider-id: string) -> result<_, string>; - - /// Reset credentials for the provider. - export llm-provider-reset-credentials: func(provider-id: string) -> result<_, string>; - - /// Count tokens for a request. - export llm-count-tokens: func( - provider-id: string, - model-id: string, - request: completion-request - ) -> result; - - /// Start streaming a completion from the model. - /// Returns a stream ID that can be used with llm-stream-next and llm-stream-close. - export llm-stream-completion-start: func( - provider-id: string, - model-id: string, - request: completion-request - ) -> result; - - /// Get the next event from a completion stream. - /// Returns None when the stream is complete. - export llm-stream-completion-next: func( - stream-id: string - ) -> result, string>; - - /// Close a completion stream and release its resources. - export llm-stream-completion-close: func( - stream-id: string - ); - - /// Get cache configuration for a model (if prompt caching is supported). - export llm-cache-configuration: func( - provider-id: string, - model-id: string - ) -> option; - } diff --git a/crates/extension_api/wit/since_v0.8.0/http-client.wit b/crates/extension_api/wit/since_v0.8.0/http-client.wit index 422ca8cd843985ccbdd7b3e663db2f0d0141f544..bb0206c17a52d4d20b99f445dca4ac606e0485f7 100644 --- a/crates/extension_api/wit/since_v0.8.0/http-client.wit +++ b/crates/extension_api/wit/since_v0.8.0/http-client.wit @@ -51,26 +51,9 @@ interface http-client { body: list, } - /// An HTTP response that includes the status code. - /// - /// Used by `fetch-fallible` which returns responses for all status codes - /// rather than treating some status codes as errors. - record http-response-with-status { - /// The HTTP status code. - status: u16, - /// The response headers. - headers: list>, - /// The response body. - body: list, - } - /// Performs an HTTP request and returns the response. - /// Returns an error if the response status is 4xx or 5xx. fetch: func(req: http-request) -> result; - /// Performs an HTTP request and returns the response regardless of its status code. - fetch-fallible: func(req: http-request) -> result; - /// An HTTP response stream. resource http-response-stream { /// Retrieves the next chunk of data from the response stream.