diff --git a/Cargo.lock b/Cargo.lock index 0eae257836e8d804d74e6c60bd45780a8e4f879c..e443d80906e157e5f76b6df80972e492b794c8a2 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -9061,7 +9061,6 @@ dependencies = [ "editor", "extension", "extension_host", - "feature_flags", "fs", "futures 0.3.31", "google_ai", diff --git a/crates/agent_ui/src/acp/thread_view.rs b/crates/agent_ui/src/acp/thread_view.rs index e939f053314582a42d6a009c155b243ad235802a..6431039de145a1ff762502c6230d20f0e0494271 100644 --- a/crates/agent_ui/src/acp/thread_view.rs +++ b/crates/agent_ui/src/acp/thread_view.rs @@ -20,10 +20,7 @@ use editor::scroll::Autoscroll; use editor::{ Editor, EditorEvent, EditorMode, MultiBuffer, PathKey, SelectionEffects, SizingBehavior, }; -use feature_flags::{ - AgentSharingFeatureFlag, AgentV2FeatureFlag, CloudThinkingEffortFeatureFlag, - FeatureFlagAppExt as _, -}; +use feature_flags::{AgentSharingFeatureFlag, AgentV2FeatureFlag, FeatureFlagAppExt as _}; use file_icons::FileIcons; use fs::Fs; use futures::FutureExt as _; diff --git a/crates/agent_ui/src/acp/thread_view/active_thread.rs b/crates/agent_ui/src/acp/thread_view/active_thread.rs index 264897b31887e5b19ec182be453c23d8b7e7802e..d1e8411fec939c6c5deef455cb08a64223b47a2a 100644 --- a/crates/agent_ui/src/acp/thread_view/active_thread.rs +++ b/crates/agent_ui/src/acp/thread_view/active_thread.rs @@ -2861,10 +2861,6 @@ impl AcpThreadView { } fn render_thinking_control(&self, cx: &mut Context) -> Option { - if !cx.has_flag::() { - return None; - } - let thread = self.as_native_thread(cx)?.read(cx); let model = thread.model()?; @@ -7205,10 +7201,6 @@ impl AcpThreadView { } fn cycle_thinking_effort(&mut self, cx: &mut Context) { - if !cx.has_flag::() { - return; - } - let Some(thread) = self.as_native_thread(cx) else { return; }; diff --git a/crates/feature_flags/src/flags.rs b/crates/feature_flags/src/flags.rs index b94264879deb87b2880ef0d62ecf08489dfa8655..6fa46e48bea0b3d6b3bc482e2bfeee3eeb0e5702 100644 --- a/crates/feature_flags/src/flags.rs +++ b/crates/feature_flags/src/flags.rs @@ -53,15 +53,3 @@ impl FeatureFlag for DiffReviewFeatureFlag { false } } - -/// Controls whether we show the new thinking and effort level controls in the Agent Panel when using applicable models -/// through the Zed provider (Cloud). -pub struct CloudThinkingEffortFeatureFlag; - -impl FeatureFlag for CloudThinkingEffortFeatureFlag { - const NAME: &'static str = "cloud-thinking-effort"; - - fn enabled_for_staff() -> bool { - false - } -} diff --git a/crates/language_models/Cargo.toml b/crates/language_models/Cargo.toml index 9685e24085495d7b028951367b1a2b4f0808c094..ece0d68152a20cbf77d0c082746959684816f115 100644 --- a/crates/language_models/Cargo.toml +++ b/crates/language_models/Cargo.toml @@ -34,7 +34,6 @@ credentials_provider.workspace = true deepseek = { workspace = true, features = ["schemars"] } extension.workspace = true extension_host.workspace = true -feature_flags.workspace = true fs.workspace = true futures.workspace = true google_ai = { workspace = true, features = ["schemars"] } diff --git a/crates/language_models/src/provider/cloud.rs b/crates/language_models/src/provider/cloud.rs index 6caf56ebcabd1f862caf5518e33099e9160b8653..bd88d5d3b384aadef0c34e997191033045dc2de5 100644 --- a/crates/language_models/src/provider/cloud.rs +++ b/crates/language_models/src/provider/cloud.rs @@ -9,7 +9,6 @@ use cloud_llm_client::{ CompletionEvent, CompletionRequestStatus, CountTokensBody, CountTokensResponse, ListModelsResponse, SERVER_SUPPORTS_STATUS_MESSAGES_HEADER_NAME, ZED_VERSION_HEADER_NAME, }; -use feature_flags::{CloudThinkingEffortFeatureFlag, FeatureFlagAppExt as _}; use futures::{ AsyncBufReadExt, FutureExt, Stream, StreamExt, future::BoxFuture, @@ -172,26 +171,10 @@ impl State { } fn update_models(&mut self, response: ListModelsResponse, cx: &mut Context) { - let is_thinking_effort_enabled = cx.has_flag::(); - let mut models = Vec::new(); for model in response.models { models.push(Arc::new(model.clone())); - - if !is_thinking_effort_enabled { - // Right now we represent thinking variants of models as separate models on the client, - // so we need to insert variants for any model that supports thinking. - if model.supports_thinking { - models.push(Arc::new(cloud_llm_client::LanguageModel { - id: cloud_llm_client::LanguageModelId( - format!("{}-thinking", model.id).into(), - ), - display_name: format!("{} Thinking", model.display_name), - ..model - })); - } - } } self.default_model = models @@ -750,13 +733,7 @@ impl LanguageModel for CloudLanguageModel { let intent = request.intent; let app_version = Some(cx.update(|cx| AppVersion::global(cx))); let thinking_allowed = request.thinking_allowed; - let is_thinking_effort_enabled = - cx.update(|cx| cx.has_flag::()); - let enable_thinking = if is_thinking_effort_enabled { - thinking_allowed && self.model.supports_thinking - } else { - thinking_allowed && self.model.id.0.ends_with("-thinking") - }; + let enable_thinking = thinking_allowed && self.model.supports_thinking; let provider_name = provider_name(&self.model.provider); match self.model.provider { cloud_llm_client::LanguageModelProvider::Anthropic => {