Address feedback

Jakub Konka created

Change summary

crates/agent/src/native_agent_server.rs                    |  4 +-
crates/agent/src/thread.rs                                 |  2 
crates/agent/src/tools/web_search_tool.rs                  |  2 
crates/agent_ui/src/agent_configuration.rs                 |  2 
crates/agent_ui/src/agent_panel.rs                         |  4 +-
crates/ai_onboarding/src/agent_api_keys_onboarding.rs      |  2 
crates/ai_onboarding/src/agent_panel_onboarding_content.rs |  2 
crates/language_model/src/language_model.rs                | 11 ++-----
crates/language_models/src/provider/anthropic.rs           |  4 +-
crates/language_models/src/provider/anthropic/telemetry.rs |  2 
crates/language_models/src/provider/cloud.rs               | 12 +++----
crates/language_models/src/provider/google.rs              |  7 ++--
crates/language_models/src/provider/open_ai.rs             |  4 +-
13 files changed, 25 insertions(+), 33 deletions(-)

Detailed changes

crates/agent/src/native_agent_server.rs 🔗

@@ -112,7 +112,7 @@ mod tests {
                 prompt_store::init(cx);
                 let registry = language_model::LanguageModelRegistry::read_global(cx);
                 let auth = registry
-                    .provider(&language_model::provider::ANTHROPIC_PROVIDER_ID)
+                    .provider(&language_model::ANTHROPIC_PROVIDER_ID)
                     .unwrap()
                     .authenticate(cx);
 
@@ -127,7 +127,7 @@ mod tests {
                 registry.update(cx, |registry, cx| {
                     registry.select_default_model(
                         Some(&language_model::SelectedModel {
-                            provider: language_model::provider::ANTHROPIC_PROVIDER_ID,
+                            provider: language_model::ANTHROPIC_PROVIDER_ID,
                             model: language_model::LanguageModelId("claude-sonnet-4-latest".into()),
                         }),
                         cx,

crates/agent/src/thread.rs 🔗

@@ -39,7 +39,7 @@ use language_model::{
     LanguageModelRequest, LanguageModelRequestMessage, LanguageModelRequestTool,
     LanguageModelToolResult, LanguageModelToolResultContent, LanguageModelToolSchemaFormat,
     LanguageModelToolUse, LanguageModelToolUseId, Role, SelectedModel, Speed, StopReason,
-    TokenUsage, provider::ZED_CLOUD_PROVIDER_ID,
+    TokenUsage, ZED_CLOUD_PROVIDER_ID,
 };
 use project::Project;
 use prompt_store::ProjectContext;

crates/agent/src/tools/web_search_tool.rs 🔗

@@ -11,7 +11,7 @@ use cloud_llm_client::WebSearchResponse;
 use futures::FutureExt as _;
 use gpui::{App, Task};
 use language_model::{
-    LanguageModelProviderId, LanguageModelToolResultContent, provider::ZED_CLOUD_PROVIDER_ID,
+    LanguageModelProviderId, LanguageModelToolResultContent, ZED_CLOUD_PROVIDER_ID,
 };
 use schemars::JsonSchema;
 use serde::{Deserialize, Serialize};

crates/agent_ui/src/agent_configuration.rs 🔗

@@ -23,7 +23,7 @@ use itertools::Itertools;
 use language::LanguageRegistry;
 use language_model::{
     IconOrSvg, LanguageModelProvider, LanguageModelProviderId, LanguageModelRegistry,
-    provider::ZED_CLOUD_PROVIDER_ID,
+    ZED_CLOUD_PROVIDER_ID,
 };
 use language_models::AllLanguageModelSettings;
 use notifications::status_toast::{StatusToast, ToastIcon};

crates/agent_ui/src/agent_panel.rs 🔗

@@ -3726,7 +3726,7 @@ impl AgentPanel {
                     .read(cx)
                     .default_model()
                     .is_some_and(|model| {
-                        model.provider.id() != language_model::provider::ZED_CLOUD_PROVIDER_ID
+                        model.provider.id() != language_model::ZED_CLOUD_PROVIDER_ID
                     })
                 {
                     return false;
@@ -3767,7 +3767,7 @@ impl AgentPanel {
             .iter()
             .any(|provider| {
                 provider.is_authenticated(cx)
-                    && provider.id() != language_model::provider::ZED_CLOUD_PROVIDER_ID
+                    && provider.id() != language_model::ZED_CLOUD_PROVIDER_ID
             });
 
         match &self.active_view {

crates/ai_onboarding/src/agent_api_keys_onboarding.rs 🔗

@@ -1,5 +1,5 @@
 use gpui::{Action, IntoElement, ParentElement, RenderOnce, point};
-use language_model::{IconOrSvg, LanguageModelRegistry, provider::ZED_CLOUD_PROVIDER_ID};
+use language_model::{IconOrSvg, LanguageModelRegistry, ZED_CLOUD_PROVIDER_ID};
 use ui::{Divider, List, ListBulletItem, prelude::*};
 
 pub struct ApiKeysWithProviders {

crates/ai_onboarding/src/agent_panel_onboarding_content.rs 🔗

@@ -3,7 +3,7 @@ use std::sync::Arc;
 use client::{Client, UserStore};
 use cloud_api_types::Plan;
 use gpui::{Entity, IntoElement, ParentElement};
-use language_model::{LanguageModelRegistry, provider::ZED_CLOUD_PROVIDER_ID};
+use language_model::{LanguageModelRegistry, ZED_CLOUD_PROVIDER_ID};
 use ui::prelude::*;
 
 use crate::{AgentPanelOnboardingCard, ApiKeysWithoutProviders, ZedAiOnboarding};

crates/language_model/src/language_model.rs 🔗

@@ -1,6 +1,6 @@
 mod api_key;
 mod model;
-pub mod provider;
+mod provider;
 mod rate_limiter;
 mod registry;
 mod request;
@@ -20,7 +20,6 @@ use gpui::{AnyView, App, AsyncApp, Entity, SharedString, Task, Window};
 use http_client::{StatusCode, http};
 use icons::IconName;
 use parking_lot::Mutex;
-use provider::parse_prompt_too_long;
 use serde::{Deserialize, Serialize};
 use std::ops::{Add, Sub};
 use std::str::FromStr;
@@ -37,6 +36,7 @@ pub use crate::registry::*;
 pub use crate::request::*;
 pub use crate::role::*;
 pub use crate::tool_schema::LanguageModelToolSchemaFormat;
+pub use provider::*;
 pub use zed_env_vars::{EnvVar, env_var};
 
 pub fn init(user_store: Entity<UserStore>, client: Arc<Client>, cx: &mut App) {
@@ -252,12 +252,7 @@ impl LanguageModelCompletionError {
             .strip_prefix("http_")
             .and_then(|code| StatusCode::from_str(code).ok())
         {
-            Self::from_http_status(
-                provider::ZED_CLOUD_PROVIDER_NAME,
-                status_code,
-                message,
-                retry_after,
-            )
+            Self::from_http_status(ZED_CLOUD_PROVIDER_NAME, status_code, message, retry_after)
         } else {
             anyhow!("completion request failed, code: {code}, message: {message}").into()
         }

crates/language_models/src/provider/anthropic.rs 🔗

@@ -10,13 +10,13 @@ use futures::{FutureExt, Stream, StreamExt, future::BoxFuture, stream::BoxStream
 use gpui::{AnyView, App, AsyncApp, Context, Entity, Task};
 use http_client::HttpClient;
 use language_model::{
-    ApiKeyState, AuthenticateError, ConfigurationViewTargetAgent, EnvVar, IconOrSvg, LanguageModel,
+    ANTHROPIC_PROVIDER_ID, ANTHROPIC_PROVIDER_NAME, ApiKeyState, AuthenticateError,
+    ConfigurationViewTargetAgent, EnvVar, IconOrSvg, LanguageModel,
     LanguageModelCacheConfiguration, LanguageModelCompletionError, LanguageModelCompletionEvent,
     LanguageModelId, LanguageModelName, LanguageModelProvider, LanguageModelProviderId,
     LanguageModelProviderName, LanguageModelProviderState, LanguageModelRequest,
     LanguageModelToolChoice, LanguageModelToolResultContent, LanguageModelToolUse, MessageContent,
     RateLimiter, Role, StopReason, env_var,
-    provider::{ANTHROPIC_PROVIDER_ID, ANTHROPIC_PROVIDER_NAME},
 };
 use settings::{Settings, SettingsStore};
 use std::pin::Pin;

crates/language_models/src/provider/anthropic/telemetry.rs 🔗

@@ -2,7 +2,7 @@ use anthropic::ANTHROPIC_API_URL;
 use anyhow::{Context as _, anyhow};
 use gpui::BackgroundExecutor;
 use http_client::{AsyncBody, HttpClient, Method, Request as HttpRequest};
-use language_model::{LanguageModel, provider::ANTHROPIC_PROVIDER_ID};
+use language_model::{ANTHROPIC_PROVIDER_ID, LanguageModel};
 use std::env;
 use std::sync::Arc;
 use util::ResultExt;

crates/language_models/src/provider/cloud.rs 🔗

@@ -19,17 +19,15 @@ use gpui::{AnyElement, AnyView, App, AsyncApp, Context, Entity, Subscription, Ta
 use http_client::http::{HeaderMap, HeaderValue};
 use http_client::{AsyncBody, HttpClient, HttpRequestExt, Method, Response, StatusCode};
 use language_model::{
-    AuthenticateError, IconOrSvg, LanguageModel, LanguageModelCacheConfiguration,
+    ANTHROPIC_PROVIDER_ID, ANTHROPIC_PROVIDER_NAME, AuthenticateError, GOOGLE_PROVIDER_ID,
+    GOOGLE_PROVIDER_NAME, IconOrSvg, LanguageModel, LanguageModelCacheConfiguration,
     LanguageModelCompletionError, LanguageModelCompletionEvent, LanguageModelEffortLevel,
     LanguageModelId, LanguageModelName, LanguageModelProvider, LanguageModelProviderId,
     LanguageModelProviderName, LanguageModelProviderState, LanguageModelRequest,
     LanguageModelToolChoice, LanguageModelToolSchemaFormat, LlmApiToken, NeedsLlmTokenRefresh,
-    PaymentRequiredError, RateLimiter, RefreshLlmTokenListener,
-    provider::{
-        ANTHROPIC_PROVIDER_ID, ANTHROPIC_PROVIDER_NAME, GOOGLE_PROVIDER_ID, GOOGLE_PROVIDER_NAME,
-        OPEN_AI_PROVIDER_ID, OPEN_AI_PROVIDER_NAME, X_AI_PROVIDER_ID, X_AI_PROVIDER_NAME,
-        ZED_CLOUD_PROVIDER_ID, ZED_CLOUD_PROVIDER_NAME,
-    },
+    OPEN_AI_PROVIDER_ID, OPEN_AI_PROVIDER_NAME, PaymentRequiredError, RateLimiter,
+    RefreshLlmTokenListener, X_AI_PROVIDER_ID, X_AI_PROVIDER_NAME, ZED_CLOUD_PROVIDER_ID,
+    ZED_CLOUD_PROVIDER_NAME,
 };
 use release_channel::AppVersion;
 use schemars::JsonSchema;

crates/language_models/src/provider/google.rs 🔗

@@ -13,10 +13,9 @@ use language_model::{
     LanguageModelToolUse, LanguageModelToolUseId, MessageContent, StopReason,
 };
 use language_model::{
-    IconOrSvg, LanguageModel, LanguageModelId, LanguageModelName, LanguageModelProvider,
-    LanguageModelProviderId, LanguageModelProviderName, LanguageModelProviderState,
-    LanguageModelRequest, RateLimiter, Role,
-    provider::{GOOGLE_PROVIDER_ID, GOOGLE_PROVIDER_NAME},
+    GOOGLE_PROVIDER_ID, GOOGLE_PROVIDER_NAME, IconOrSvg, LanguageModel, LanguageModelId,
+    LanguageModelName, LanguageModelProvider, LanguageModelProviderId, LanguageModelProviderName,
+    LanguageModelProviderState, LanguageModelRequest, RateLimiter, Role,
 };
 use schemars::JsonSchema;
 use serde::{Deserialize, Serialize};

crates/language_models/src/provider/open_ai.rs 🔗

@@ -10,8 +10,8 @@ use language_model::{
     LanguageModelProvider, LanguageModelProviderId, LanguageModelProviderName,
     LanguageModelProviderState, LanguageModelRequest, LanguageModelRequestMessage,
     LanguageModelToolChoice, LanguageModelToolResultContent, LanguageModelToolUse,
-    LanguageModelToolUseId, MessageContent, RateLimiter, Role, StopReason, TokenUsage, env_var,
-    provider::{OPEN_AI_PROVIDER_ID, OPEN_AI_PROVIDER_NAME},
+    LanguageModelToolUseId, MessageContent, OPEN_AI_PROVIDER_ID, OPEN_AI_PROVIDER_NAME,
+    RateLimiter, Role, StopReason, TokenUsage, env_var,
 };
 use menu;
 use open_ai::responses::{