@@ -35,20 +35,19 @@ pub use wit::{
DeviceFlowPromptInfo as LlmDeviceFlowPromptInfo, ImageData as LlmImageData,
MessageContent as LlmMessageContent, MessageRole as LlmMessageRole,
ModelCapabilities as LlmModelCapabilities, ModelInfo as LlmModelInfo,
- OauthWebAuthConfig as LlmOauthWebAuthConfig,
- OauthWebAuthResult as LlmOauthWebAuthResult, ProviderInfo as LlmProviderInfo,
- ProviderSettings as LlmProviderSettings, RequestMessage as LlmRequestMessage,
- StopReason as LlmStopReason, ThinkingContent as LlmThinkingContent,
- TokenUsage as LlmTokenUsage, ToolChoice as LlmToolChoice,
- ToolDefinition as LlmToolDefinition, ToolInputFormat as LlmToolInputFormat,
- ToolResult as LlmToolResult, ToolResultContent as LlmToolResultContent,
- ToolUse as LlmToolUse, ToolUseJsonParseError as LlmToolUseJsonParseError,
+ OauthWebAuthConfig as LlmOauthWebAuthConfig, OauthWebAuthResult as LlmOauthWebAuthResult,
+ ProviderInfo as LlmProviderInfo, ProviderSettings as LlmProviderSettings,
+ RequestMessage as LlmRequestMessage, StopReason as LlmStopReason,
+ ThinkingContent as LlmThinkingContent, TokenUsage as LlmTokenUsage,
+ ToolChoice as LlmToolChoice, ToolDefinition as LlmToolDefinition,
+ ToolInputFormat as LlmToolInputFormat, ToolResult as LlmToolResult,
+ ToolResultContent as LlmToolResultContent, ToolUse as LlmToolUse,
+ ToolUseJsonParseError as LlmToolUseJsonParseError,
delete_credential as llm_delete_credential, get_credential as llm_get_credential,
get_env_var as llm_get_env_var, get_provider_settings as llm_get_provider_settings,
oauth_open_browser as llm_oauth_open_browser,
oauth_send_http_request as llm_oauth_send_http_request,
- oauth_start_web_auth as llm_oauth_start_web_auth,
- store_credential as llm_store_credential,
+ oauth_start_web_auth as llm_oauth_start_web_auth, store_credential as llm_store_credential,
},
zed::extension::nodejs::{
node_binary_path, npm_install_package, npm_package_installed_version,
@@ -10,8 +10,8 @@ use crate::wasm_host::wit::{
LlmStopReason, LlmThinkingContent, LlmToolChoice, LlmToolDefinition, LlmToolInputFormat,
LlmToolResult, LlmToolResultContent, LlmToolUse,
};
-use collections::HashMap;
use anyhow::{Result, anyhow};
+use collections::HashMap;
use credentials_provider::CredentialsProvider;
use extension::{LanguageModelAuthConfig, OAuthConfig};
use futures::future::BoxFuture;
@@ -29,8 +29,8 @@ use language_model::{
LanguageModelCacheConfiguration, LanguageModelCompletionError, LanguageModelCompletionEvent,
LanguageModelId, LanguageModelName, LanguageModelProvider, LanguageModelProviderId,
LanguageModelProviderName, LanguageModelProviderState, LanguageModelRequest,
- LanguageModelToolChoice, LanguageModelToolUse, LanguageModelToolUseId, RateLimiter,
- StopReason, TokenUsage,
+ LanguageModelToolChoice, LanguageModelToolUse, LanguageModelToolUseId, RateLimiter, StopReason,
+ TokenUsage,
};
use markdown::{HeadingLevelStyles, Markdown, MarkdownElement, MarkdownStyle};
use settings::Settings;
@@ -150,13 +150,14 @@ impl ExtensionLanguageModelProvider {
model_info: &LlmModelInfo,
cache_configs: &HashMap<String, LlmCacheConfiguration>,
) -> Arc<dyn LanguageModel> {
- let cache_config = cache_configs.get(&model_info.id).map(|config| {
- LanguageModelCacheConfiguration {
- max_cache_anchors: config.max_cache_anchors as usize,
- should_speculate: false,
- min_total_token: config.min_total_token_count,
- }
- });
+ let cache_config =
+ cache_configs
+ .get(&model_info.id)
+ .map(|config| LanguageModelCacheConfiguration {
+ max_cache_anchors: config.max_cache_anchors as usize,
+ should_speculate: false,
+ min_total_token: config.min_total_token_count,
+ });
Arc::new(ExtensionLanguageModel {
extension: self.extension.clone(),
@@ -1640,9 +1641,7 @@ impl LanguageModel for ExtensionLanguageModel {
fn tool_input_format(&self) -> LanguageModelToolSchemaFormat {
match self.model_info.capabilities.tool_input_format {
LlmToolInputFormat::JsonSchema => LanguageModelToolSchemaFormat::JsonSchema,
- LlmToolInputFormat::JsonSchemaSubset => {
- LanguageModelToolSchemaFormat::JsonSchemaSubset
- }
+ LlmToolInputFormat::JsonSchemaSubset => LanguageModelToolSchemaFormat::JsonSchemaSubset,
LlmToolInputFormat::Simplified => LanguageModelToolSchemaFormat::JsonSchema,
}
}