Cargo.lock 🔗
@@ -9061,7 +9061,6 @@ dependencies = [
"editor",
"extension",
"extension_host",
- "feature_flags",
"fs",
"futures 0.3.31",
"google_ai",
Marshall Bowers created
This PR removes the `cloud-thinking-effort` feature flag to ship the
thinking effort UI for the Zed provider.
Release Notes:
- Added support for controlling thinking effort levels with supported
models using the Zed provider.
Cargo.lock | 1
crates/agent_ui/src/acp/thread_view.rs | 5 --
crates/agent_ui/src/acp/thread_view/active_thread.rs | 8 ----
crates/feature_flags/src/flags.rs | 12 ------
crates/language_models/Cargo.toml | 1
crates/language_models/src/provider/cloud.rs | 25 -------------
6 files changed, 2 insertions(+), 50 deletions(-)
@@ -9061,7 +9061,6 @@ dependencies = [
"editor",
"extension",
"extension_host",
- "feature_flags",
"fs",
"futures 0.3.31",
"google_ai",
@@ -20,10 +20,7 @@ use editor::scroll::Autoscroll;
use editor::{
Editor, EditorEvent, EditorMode, MultiBuffer, PathKey, SelectionEffects, SizingBehavior,
};
-use feature_flags::{
- AgentSharingFeatureFlag, AgentV2FeatureFlag, CloudThinkingEffortFeatureFlag,
- FeatureFlagAppExt as _,
-};
+use feature_flags::{AgentSharingFeatureFlag, AgentV2FeatureFlag, FeatureFlagAppExt as _};
use file_icons::FileIcons;
use fs::Fs;
use futures::FutureExt as _;
@@ -2861,10 +2861,6 @@ impl AcpThreadView {
}
fn render_thinking_control(&self, cx: &mut Context<Self>) -> Option<AnyElement> {
- if !cx.has_flag::<CloudThinkingEffortFeatureFlag>() {
- return None;
- }
-
let thread = self.as_native_thread(cx)?.read(cx);
let model = thread.model()?;
@@ -7205,10 +7201,6 @@ impl AcpThreadView {
}
fn cycle_thinking_effort(&mut self, cx: &mut Context<Self>) {
- if !cx.has_flag::<CloudThinkingEffortFeatureFlag>() {
- return;
- }
-
let Some(thread) = self.as_native_thread(cx) else {
return;
};
@@ -53,15 +53,3 @@ impl FeatureFlag for DiffReviewFeatureFlag {
false
}
}
-
-/// Controls whether we show the new thinking and effort level controls in the Agent Panel when using applicable models
-/// through the Zed provider (Cloud).
-pub struct CloudThinkingEffortFeatureFlag;
-
-impl FeatureFlag for CloudThinkingEffortFeatureFlag {
- const NAME: &'static str = "cloud-thinking-effort";
-
- fn enabled_for_staff() -> bool {
- false
- }
-}
@@ -34,7 +34,6 @@ credentials_provider.workspace = true
deepseek = { workspace = true, features = ["schemars"] }
extension.workspace = true
extension_host.workspace = true
-feature_flags.workspace = true
fs.workspace = true
futures.workspace = true
google_ai = { workspace = true, features = ["schemars"] }
@@ -9,7 +9,6 @@ use cloud_llm_client::{
CompletionEvent, CompletionRequestStatus, CountTokensBody, CountTokensResponse,
ListModelsResponse, SERVER_SUPPORTS_STATUS_MESSAGES_HEADER_NAME, ZED_VERSION_HEADER_NAME,
};
-use feature_flags::{CloudThinkingEffortFeatureFlag, FeatureFlagAppExt as _};
use futures::{
AsyncBufReadExt, FutureExt, Stream, StreamExt,
future::BoxFuture,
@@ -172,26 +171,10 @@ impl State {
}
fn update_models(&mut self, response: ListModelsResponse, cx: &mut Context<Self>) {
- let is_thinking_effort_enabled = cx.has_flag::<CloudThinkingEffortFeatureFlag>();
-
let mut models = Vec::new();
for model in response.models {
models.push(Arc::new(model.clone()));
-
- if !is_thinking_effort_enabled {
- // Right now we represent thinking variants of models as separate models on the client,
- // so we need to insert variants for any model that supports thinking.
- if model.supports_thinking {
- models.push(Arc::new(cloud_llm_client::LanguageModel {
- id: cloud_llm_client::LanguageModelId(
- format!("{}-thinking", model.id).into(),
- ),
- display_name: format!("{} Thinking", model.display_name),
- ..model
- }));
- }
- }
}
self.default_model = models
@@ -750,13 +733,7 @@ impl LanguageModel for CloudLanguageModel {
let intent = request.intent;
let app_version = Some(cx.update(|cx| AppVersion::global(cx)));
let thinking_allowed = request.thinking_allowed;
- let is_thinking_effort_enabled =
- cx.update(|cx| cx.has_flag::<CloudThinkingEffortFeatureFlag>());
- let enable_thinking = if is_thinking_effort_enabled {
- thinking_allowed && self.model.supports_thinking
- } else {
- thinking_allowed && self.model.id.0.ends_with("-thinking")
- };
+ let enable_thinking = thinking_allowed && self.model.supports_thinking;
let provider_name = provider_name(&self.model.provider);
match self.model.provider {
cloud_llm_client::LanguageModelProvider::Anthropic => {