From 7ce4f2ae622ab25ff1e83c1aed19ae9d45b7bee5 Mon Sep 17 00:00:00 2001 From: morgankrey Date: Mon, 24 Nov 2025 13:38:14 -0600 Subject: [PATCH 001/749] Opus 4.5 and Gemini 3 to docs (#43424) Add Opus 4.5 and Gemini 3 to docs Release Notes: - N/A --- docs/src/ai/models.md | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/docs/src/ai/models.md b/docs/src/ai/models.md index 5b379fc75435c14ac46587f7449c7a5c54becfcf..6033bf23fad372b15909ff09a43f2747f3e619c0 100644 --- a/docs/src/ai/models.md +++ b/docs/src/ai/models.md @@ -5,6 +5,10 @@ We’re working hard to expand the models supported by Zed’s subscription offe | Model | Provider | Token Type | Provider Price per 1M tokens | Zed Price per 1M tokens | | ---------------------- | --------- | ------------------- | ---------------------------- | ----------------------- | +| Claude Opus 4.5 | Anthropic | Input | $5.00 | $5.50 | +| | Anthropic | Output | $25.00 | $27.50 | +| | Anthropic | Input - Cache Write | $6.25 | $6.875 | +| | Anthropic | Input - Cache Read | $0.50 | $0.55 | | Claude Opus 4.1 | Anthropic | Input | $15.00 | $16.50 | | | Anthropic | Output | $75.00 | $82.50 | | | Anthropic | Input - Cache Write | $18.75 | $20.625 | @@ -34,6 +38,8 @@ We’re working hard to expand the models supported by Zed’s subscription offe | GPT-5 nano | OpenAI | Input | $0.05 | $0.055 | | | OpenAI | Output | $0.40 | $0.44 | | | OpenAI | Cached Input | $0.005 | $0.0055 | +| Gemini 3.0 Pro | Google | Input | $2.00 | $2.20 | +| | Google | Output | $12.00 | $13.20 | | Gemini 2.5 Pro | Google | Input | $1.25 | $1.375 | | | Google | Output | $10.00 | $11.00 | | Gemini 2.5 Flash | Google | Input | $0.30 | $0.33 | @@ -63,6 +69,7 @@ A context window is the maximum span of text and code an LLM can consider at onc | Model | Provider | Zed-Hosted Context Window | | ----------------- | --------- | ------------------------- | +| Claude Opus 4.5 | Anthropic | 200k | | Claude Opus 4.1 | Anthropic | 200k | | Claude Sonnet 4 | Anthropic | 200k | | Claude Sonnet 3.7 | Anthropic | 200k | @@ -72,6 +79,7 @@ A context window is the maximum span of text and code an LLM can consider at onc | GPT-5 nano | OpenAI | 400k | | Gemini 2.5 Pro | Google | 200k | | Gemini 2.5 Flash | Google | 200k | +| Gemini 3.0 Pro | Google | 200k | > We're planning on expanding supported context windows for hosted Sonnet 4 and Gemini 2.5 Pro/Flash in the near future. Stay tuned! From d295ff4f04f0b38eca419c04bc3ac128474d92fc Mon Sep 17 00:00:00 2001 From: localcc Date: Mon, 24 Nov 2025 20:48:16 +0100 Subject: [PATCH 002/749] Improve Windows path canonicalization (#43423) Path canonicalization on windows will now favor keeping the drive letter intact when canonicalizing paths. This helps some lsps with mapped network drive compatibility. Closes #41336 Release Notes: - N/A --- crates/fs/src/fs.rs | 77 ++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 76 insertions(+), 1 deletion(-) diff --git a/crates/fs/src/fs.rs b/crates/fs/src/fs.rs index 33cc83a7886349a537a87d4b6c8bb3f5211608fc..93192ecd2bd2449dafa622a69045be6811a43cf7 100644 --- a/crates/fs/src/fs.rs +++ b/crates/fs/src/fs.rs @@ -421,6 +421,75 @@ impl RealFs { job_event_subscribers: Arc::new(Mutex::new(Vec::new())), } } + + #[cfg(target_os = "windows")] + fn canonicalize(path: &Path) -> Result { + let mut strip_prefix = None; + + let mut new_path = PathBuf::new(); + for component in path.components() { + match component { + std::path::Component::Prefix(_) => { + let canonicalized = std::fs::canonicalize(component)?; + + let mut strip = PathBuf::new(); + for component in canonicalized.components() { + match component { + Component::Prefix(prefix_component) => { + match prefix_component.kind() { + std::path::Prefix::Verbatim(os_str) => { + strip.push(os_str); + } + std::path::Prefix::VerbatimUNC(host, share) => { + strip.push("\\\\"); + strip.push(host); + strip.push(share); + } + std::path::Prefix::VerbatimDisk(disk) => { + strip.push(format!("{}:", disk as char)); + } + _ => strip.push(component), + }; + } + _ => strip.push(component), + } + } + strip_prefix = Some(strip); + new_path.push(component); + } + std::path::Component::RootDir => { + new_path.push(component); + } + std::path::Component::CurDir => { + if strip_prefix.is_none() { + // unrooted path + new_path.push(component); + } + } + std::path::Component::ParentDir => { + if strip_prefix.is_some() { + // rooted path + new_path.pop(); + } else { + new_path.push(component); + } + } + std::path::Component::Normal(_) => { + if let Ok(link) = std::fs::read_link(new_path.join(component)) { + let link = match &strip_prefix { + Some(e) => link.strip_prefix(e).unwrap_or(&link), + None => &link, + }; + new_path.extend(link); + } else { + new_path.push(component); + } + } + } + } + + Ok(new_path) + } } #[async_trait::async_trait] @@ -749,7 +818,13 @@ impl Fs for RealFs { let path = path.to_owned(); self.executor .spawn(async move { - std::fs::canonicalize(&path).with_context(|| format!("canonicalizing {path:?}")) + #[cfg(target_os = "windows")] + let result = Self::canonicalize(&path); + + #[cfg(not(target_os = "windows"))] + let result = std::fs::canonicalize(&path); + + result.with_context(|| format!("canonicalizing {path:?}")) }) .await } From bd2c1027fac828b3b6cecb37a51bacc553f431b2 Mon Sep 17 00:00:00 2001 From: Mikayla Maki Date: Mon, 24 Nov 2025 12:01:43 -0800 Subject: [PATCH 003/749] Add support for Opus 4.5 (#43425) Adds support for Opus 4.5 - [x] BYOK - [x] Amazon Bedrock Release Notes: - Added support for Opus 4.5 Co-authored-by: Richard Feldman --- crates/anthropic/src/anthropic.rs | 30 +++++++++++++++ crates/bedrock/src/models.rs | 62 ++++++++++++++++++++++++++----- 2 files changed, 82 insertions(+), 10 deletions(-) diff --git a/crates/anthropic/src/anthropic.rs b/crates/anthropic/src/anthropic.rs index d4f89808379b0bf10c8f3eaa22484b61fd8c26f1..041401418c427251a944fc39bb8ac83a0e22bc13 100644 --- a/crates/anthropic/src/anthropic.rs +++ b/crates/anthropic/src/anthropic.rs @@ -67,6 +67,13 @@ pub enum Model { alias = "claude-opus-4-1-thinking-latest" )] ClaudeOpus4_1Thinking, + #[serde(rename = "claude-opus-4-5", alias = "claude-opus-4-5-latest")] + ClaudeOpus4_5, + #[serde( + rename = "claude-opus-4-5-thinking", + alias = "claude-opus-4-5-thinking-latest" + )] + ClaudeOpus4_5Thinking, #[serde(rename = "claude-sonnet-4", alias = "claude-sonnet-4-latest")] ClaudeSonnet4, #[serde( @@ -131,6 +138,14 @@ impl Model { } pub fn from_id(id: &str) -> Result { + if id.starts_with("claude-opus-4-5-thinking") { + return Ok(Self::ClaudeOpus4_5Thinking); + } + + if id.starts_with("claude-opus-4-5") { + return Ok(Self::ClaudeOpus4_5); + } + if id.starts_with("claude-opus-4-1-thinking") { return Ok(Self::ClaudeOpus4_1Thinking); } @@ -208,6 +223,8 @@ impl Model { Self::ClaudeOpus4_1 => "claude-opus-4-1-latest", Self::ClaudeOpus4Thinking => "claude-opus-4-thinking-latest", Self::ClaudeOpus4_1Thinking => "claude-opus-4-1-thinking-latest", + Self::ClaudeOpus4_5 => "claude-opus-4-5-latest", + Self::ClaudeOpus4_5Thinking => "claude-opus-4-5-thinking-latest", Self::ClaudeSonnet4 => "claude-sonnet-4-latest", Self::ClaudeSonnet4Thinking => "claude-sonnet-4-thinking-latest", Self::ClaudeSonnet4_5 => "claude-sonnet-4-5-latest", @@ -230,6 +247,7 @@ impl Model { match self { Self::ClaudeOpus4 | Self::ClaudeOpus4Thinking => "claude-opus-4-20250514", Self::ClaudeOpus4_1 | Self::ClaudeOpus4_1Thinking => "claude-opus-4-1-20250805", + Self::ClaudeOpus4_5 | Self::ClaudeOpus4_5Thinking => "claude-opus-4-5-20251101", Self::ClaudeSonnet4 | Self::ClaudeSonnet4Thinking => "claude-sonnet-4-20250514", Self::ClaudeSonnet4_5 | Self::ClaudeSonnet4_5Thinking => "claude-sonnet-4-5-20250929", Self::Claude3_5Sonnet => "claude-3-5-sonnet-latest", @@ -249,6 +267,8 @@ impl Model { Self::ClaudeOpus4_1 => "Claude Opus 4.1", Self::ClaudeOpus4Thinking => "Claude Opus 4 Thinking", Self::ClaudeOpus4_1Thinking => "Claude Opus 4.1 Thinking", + Self::ClaudeOpus4_5 => "Claude Opus 4.5", + Self::ClaudeOpus4_5Thinking => "Claude Opus 4.5 Thinking", Self::ClaudeSonnet4 => "Claude Sonnet 4", Self::ClaudeSonnet4Thinking => "Claude Sonnet 4 Thinking", Self::ClaudeSonnet4_5 => "Claude Sonnet 4.5", @@ -274,6 +294,8 @@ impl Model { | Self::ClaudeOpus4_1 | Self::ClaudeOpus4Thinking | Self::ClaudeOpus4_1Thinking + | Self::ClaudeOpus4_5 + | Self::ClaudeOpus4_5Thinking | Self::ClaudeSonnet4 | Self::ClaudeSonnet4Thinking | Self::ClaudeSonnet4_5 @@ -303,6 +325,8 @@ impl Model { | Self::ClaudeOpus4_1 | Self::ClaudeOpus4Thinking | Self::ClaudeOpus4_1Thinking + | Self::ClaudeOpus4_5 + | Self::ClaudeOpus4_5Thinking | Self::ClaudeSonnet4 | Self::ClaudeSonnet4Thinking | Self::ClaudeSonnet4_5 @@ -326,6 +350,8 @@ impl Model { | Self::ClaudeOpus4_1 | Self::ClaudeOpus4Thinking | Self::ClaudeOpus4_1Thinking + | Self::ClaudeOpus4_5 + | Self::ClaudeOpus4_5Thinking | Self::ClaudeSonnet4 | Self::ClaudeSonnet4Thinking | Self::ClaudeSonnet4_5 @@ -348,6 +374,8 @@ impl Model { | Self::ClaudeOpus4_1 | Self::ClaudeOpus4Thinking | Self::ClaudeOpus4_1Thinking + | Self::ClaudeOpus4_5 + | Self::ClaudeOpus4_5Thinking | Self::ClaudeSonnet4 | Self::ClaudeSonnet4Thinking | Self::ClaudeSonnet4_5 @@ -372,6 +400,7 @@ impl Model { match self { Self::ClaudeOpus4 | Self::ClaudeOpus4_1 + | Self::ClaudeOpus4_5 | Self::ClaudeSonnet4 | Self::ClaudeSonnet4_5 | Self::Claude3_5Sonnet @@ -383,6 +412,7 @@ impl Model { | Self::Claude3Haiku => AnthropicModelMode::Default, Self::ClaudeOpus4Thinking | Self::ClaudeOpus4_1Thinking + | Self::ClaudeOpus4_5Thinking | Self::ClaudeSonnet4Thinking | Self::ClaudeSonnet4_5Thinking | Self::ClaudeHaiku4_5Thinking diff --git a/crates/bedrock/src/models.rs b/crates/bedrock/src/models.rs index 1691ffe199975983fbb40b781aac00a2703871ea..f3b276a8d2f30e8062931e76608bbc3a302ad734 100644 --- a/crates/bedrock/src/models.rs +++ b/crates/bedrock/src/models.rs @@ -51,6 +51,13 @@ pub enum Model { alias = "claude-opus-4-1-thinking-latest" )] ClaudeOpus4_1Thinking, + #[serde(rename = "claude-opus-4-5", alias = "claude-opus-4-5-latest")] + ClaudeOpus4_5, + #[serde( + rename = "claude-opus-4-5-thinking", + alias = "claude-opus-4-5-thinking-latest" + )] + ClaudeOpus4_5Thinking, #[serde(rename = "claude-3-5-sonnet-v2", alias = "claude-3-5-sonnet-latest")] Claude3_5SonnetV2, #[serde(rename = "claude-3-7-sonnet", alias = "claude-3-7-sonnet-latest")] @@ -141,7 +148,19 @@ impl Model { } pub fn from_id(id: &str) -> anyhow::Result { - if id.starts_with("claude-3-5-sonnet-v2") { + if id.starts_with("claude-opus-4-5-thinking") { + Ok(Self::ClaudeOpus4_5Thinking) + } else if id.starts_with("claude-opus-4-5") { + Ok(Self::ClaudeOpus4_5) + } else if id.starts_with("claude-opus-4-1-thinking") { + Ok(Self::ClaudeOpus4_1Thinking) + } else if id.starts_with("claude-opus-4-1") { + Ok(Self::ClaudeOpus4_1) + } else if id.starts_with("claude-opus-4-thinking") { + Ok(Self::ClaudeOpus4Thinking) + } else if id.starts_with("claude-opus-4") { + Ok(Self::ClaudeOpus4) + } else if id.starts_with("claude-3-5-sonnet-v2") { Ok(Self::Claude3_5SonnetV2) } else if id.starts_with("claude-3-opus") { Ok(Self::Claude3Opus) @@ -178,6 +197,8 @@ impl Model { Model::ClaudeOpus4_1 => "claude-opus-4-1", Model::ClaudeOpus4Thinking => "claude-opus-4-thinking", Model::ClaudeOpus4_1Thinking => "claude-opus-4-1-thinking", + Model::ClaudeOpus4_5 => "claude-opus-4-5", + Model::ClaudeOpus4_5Thinking => "claude-opus-4-5-thinking", Model::Claude3_5SonnetV2 => "claude-3-5-sonnet-v2", Model::Claude3_5Sonnet => "claude-3-5-sonnet", Model::Claude3Opus => "claude-3-opus", @@ -245,6 +266,9 @@ impl Model { Model::ClaudeOpus4_1 | Model::ClaudeOpus4_1Thinking => { "anthropic.claude-opus-4-1-20250805-v1:0" } + Model::ClaudeOpus4_5 | Model::ClaudeOpus4_5Thinking => { + "anthropic.claude-opus-4-5-20251101-v1:0" + } Model::Claude3_5SonnetV2 => "anthropic.claude-3-5-sonnet-20241022-v2:0", Model::Claude3_5Sonnet => "anthropic.claude-3-5-sonnet-20240620-v1:0", Model::Claude3Opus => "anthropic.claude-3-opus-20240229-v1:0", @@ -309,6 +333,8 @@ impl Model { Self::ClaudeOpus4_1 => "Claude Opus 4.1", Self::ClaudeOpus4Thinking => "Claude Opus 4 Thinking", Self::ClaudeOpus4_1Thinking => "Claude Opus 4.1 Thinking", + Self::ClaudeOpus4_5 => "Claude Opus 4.5", + Self::ClaudeOpus4_5Thinking => "Claude Opus 4.5 Thinking", Self::Claude3_5SonnetV2 => "Claude 3.5 Sonnet v2", Self::Claude3_5Sonnet => "Claude 3.5 Sonnet", Self::Claude3Opus => "Claude 3 Opus", @@ -379,7 +405,9 @@ impl Model { | Self::ClaudeSonnet4_5 | Self::ClaudeSonnet4_5Thinking | Self::ClaudeOpus4Thinking - | Self::ClaudeOpus4_1Thinking => 200_000, + | Self::ClaudeOpus4_1Thinking + | Self::ClaudeOpus4_5 + | Self::ClaudeOpus4_5Thinking => 200_000, Self::AmazonNovaPremier => 1_000_000, Self::PalmyraWriterX5 => 1_000_000, Self::PalmyraWriterX4 => 128_000, @@ -393,7 +421,11 @@ impl Model { Self::Claude3Opus | Self::Claude3Sonnet | Self::Claude3_5Haiku => 4_096, Self::Claude3_7Sonnet | Self::Claude3_7SonnetThinking => 128_000, Self::ClaudeSonnet4 | Self::ClaudeSonnet4Thinking => 64_000, - Self::ClaudeSonnet4_5 | Self::ClaudeSonnet4_5Thinking | Self::ClaudeHaiku4_5 => 64_000, + Self::ClaudeSonnet4_5 + | Self::ClaudeSonnet4_5Thinking + | Self::ClaudeHaiku4_5 + | Self::ClaudeOpus4_5 + | Self::ClaudeOpus4_5Thinking => 64_000, Self::ClaudeOpus4 | Self::ClaudeOpus4Thinking | Self::ClaudeOpus4_1 @@ -418,6 +450,8 @@ impl Model { | Self::ClaudeOpus4Thinking | Self::ClaudeOpus4_1 | Self::ClaudeOpus4_1Thinking + | Self::ClaudeOpus4_5 + | Self::ClaudeOpus4_5Thinking | Self::ClaudeSonnet4 | Self::ClaudeSonnet4Thinking | Self::ClaudeSonnet4_5 @@ -443,6 +477,8 @@ impl Model { | Self::ClaudeOpus4Thinking | Self::ClaudeOpus4_1 | Self::ClaudeOpus4_1Thinking + | Self::ClaudeOpus4_5 + | Self::ClaudeOpus4_5Thinking | Self::ClaudeSonnet4 | Self::ClaudeSonnet4Thinking | Self::ClaudeSonnet4_5 @@ -484,7 +520,9 @@ impl Model { | Self::ClaudeOpus4 | Self::ClaudeOpus4Thinking | Self::ClaudeOpus4_1 - | Self::ClaudeOpus4_1Thinking => true, + | Self::ClaudeOpus4_1Thinking + | Self::ClaudeOpus4_5 + | Self::ClaudeOpus4_5Thinking => true, // Custom models - check if they have cache configuration Self::Custom { @@ -506,7 +544,9 @@ impl Model { | Self::ClaudeOpus4 | Self::ClaudeOpus4Thinking | Self::ClaudeOpus4_1 - | Self::ClaudeOpus4_1Thinking => Some(BedrockModelCacheConfiguration { + | Self::ClaudeOpus4_1Thinking + | Self::ClaudeOpus4_5 + | Self::ClaudeOpus4_5Thinking => Some(BedrockModelCacheConfiguration { max_cache_anchors: 4, min_total_token: 1024, }), @@ -535,11 +575,11 @@ impl Model { budget_tokens: Some(4096), } } - Model::ClaudeOpus4Thinking | Model::ClaudeOpus4_1Thinking => { - BedrockModelMode::Thinking { - budget_tokens: Some(4096), - } - } + Model::ClaudeOpus4Thinking + | Model::ClaudeOpus4_1Thinking + | Model::ClaudeOpus4_5Thinking => BedrockModelMode::Thinking { + budget_tokens: Some(4096), + }, _ => BedrockModelMode::Default, } } @@ -593,6 +633,8 @@ impl Model { | Model::ClaudeOpus4Thinking | Model::ClaudeOpus4_1 | Model::ClaudeOpus4_1Thinking + | Model::ClaudeOpus4_5 + | Model::ClaudeOpus4_5Thinking | Model::Claude3Haiku | Model::Claude3Opus | Model::Claude3Sonnet From 342eba6f220625c015d00334c6bc354f0e2c52e1 Mon Sep 17 00:00:00 2001 From: Mayank Verma Date: Tue, 25 Nov 2025 02:46:35 +0530 Subject: [PATCH 004/749] project: Send LSP metadata to remote ServerInfo (#42831) Closes #39582 Release Notes: - Added LSP metadata to remote ServerInfo Here's the before/after: https://github.com/user-attachments/assets/1057faa5-82af-4975-abad-5e10e139fac1 --------- Co-authored-by: Kirill Bulatov --- crates/language_tools/src/lsp_log_view.rs | 82 +++++++++++++---------- crates/project/src/lsp_store.rs | 46 ++++++++++++- crates/project/src/project.rs | 45 ++++++++++--- crates/proto/proto/lsp.proto | 8 +++ 4 files changed, 136 insertions(+), 45 deletions(-) diff --git a/crates/language_tools/src/lsp_log_view.rs b/crates/language_tools/src/lsp_log_view.rs index 3f99a3e83413691c3893b184406f6e2569062623..c7aa78067294cbb63266e55a0d05d7abbeefddc2 100644 --- a/crates/language_tools/src/lsp_log_view.rs +++ b/crates/language_tools/src/lsp_log_view.rs @@ -7,12 +7,15 @@ use gpui::{ }; use language::{LanguageServerId, language_settings::SoftWrap}; use lsp::{ - LanguageServer, LanguageServerBinary, LanguageServerName, LanguageServerSelector, MessageType, - SetTraceParams, TraceValue, notification::SetTrace, + LanguageServer, LanguageServerName, LanguageServerSelector, MessageType, SetTraceParams, + TraceValue, notification::SetTrace, }; use project::{ - Project, - lsp_store::log_store::{self, Event, LanguageServerKind, LogKind, LogStore, Message}, + LanguageServerStatus, Project, + lsp_store::{ + LanguageServerBinaryInfo, + log_store::{self, Event, LanguageServerKind, LogKind, LogStore, Message}, + }, search::SearchQuery, }; use proto::toggle_lsp_logs::LogType; @@ -337,16 +340,28 @@ impl LspLogView { * Capabilities: {CAPABILITIES} * Configuration: {CONFIGURATION}", - NAME = info.name, + NAME = info.status.name, ID = info.id, - BINARY = info - .binary - .as_ref() - .map_or_else(|| "Unknown".to_string(), |binary| format!("{binary:#?}")), - WORKSPACE_FOLDERS = info.workspace_folders.join(", "), + BINARY = info.status.binary.as_ref().map_or_else( + || "Unknown".to_string(), + |binary| serde_json::to_string_pretty(binary) + .unwrap_or_else(|e| format!("Failed to serialize binary info: {e:#}")) + ), + WORKSPACE_FOLDERS = info + .status + .workspace_folders + .iter() + .filter_map(|uri| { + uri.to_file_path() + .ok() + .map(|path| path.to_string_lossy().into_owned()) + }) + .collect::>() + .join(", "), CAPABILITIES = serde_json::to_string_pretty(&info.capabilities) .unwrap_or_else(|e| format!("Failed to serialize capabilities: {e}")), CONFIGURATION = info + .status .configuration .map(|configuration| serde_json::to_string_pretty(&configuration)) .transpose() @@ -633,17 +648,12 @@ impl LspLogView { .or_else(move || { let capabilities = lsp_store.lsp_server_capabilities.get(&server_id)?.clone(); - let name = lsp_store - .language_server_statuses - .get(&server_id) - .map(|status| status.name.clone())?; + let status = lsp_store.language_server_statuses.get(&server_id)?.clone(); + Some(ServerInfo { id: server_id, capabilities, - binary: None, - name, - workspace_folders: Vec::new(), - configuration: None, + status, }) }) }) @@ -1314,10 +1324,7 @@ impl LspLogToolbarItemView { struct ServerInfo { id: LanguageServerId, capabilities: lsp::ServerCapabilities, - binary: Option, - name: LanguageServerName, - workspace_folders: Vec, - configuration: Option, + status: LanguageServerStatus, } impl ServerInfo { @@ -1325,18 +1332,25 @@ impl ServerInfo { Self { id: server.server_id(), capabilities: server.capabilities(), - binary: Some(server.binary().clone()), - name: server.name(), - workspace_folders: server - .workspace_folders() - .into_iter() - .filter_map(|path| { - path.to_file_path() - .ok() - .map(|path| path.to_string_lossy().into_owned()) - }) - .collect::>(), - configuration: Some(server.configuration().clone()), + status: LanguageServerStatus { + name: server.name(), + pending_work: Default::default(), + has_pending_diagnostic_updates: false, + progress_tokens: Default::default(), + worktree: None, + binary: Some(LanguageServerBinaryInfo { + path: server.binary().path.to_string_lossy().into_owned(), + arguments: server + .binary() + .arguments + .iter() + .map(|arg| arg.to_string_lossy().into_owned()) + .collect(), + env: server.binary().env.clone(), + }), + configuration: Some(server.configuration().clone()), + workspace_folders: server.workspace_folders(), + }, } } } diff --git a/crates/project/src/lsp_store.rs b/crates/project/src/lsp_store.rs index e233ff1c5c121e301a85d829093cbdd37020fe07..d18262215198b8a1d7da38a4a325b6f1dcb82084 100644 --- a/crates/project/src/lsp_store.rs +++ b/crates/project/src/lsp_store.rs @@ -93,6 +93,7 @@ use rpc::{ proto::{LspRequestId, LspRequestMessage as _}, }; use serde::Serialize; +use serde_json::Value; use settings::{Settings, SettingsLocation, SettingsStore}; use sha2::{Digest, Sha256}; use smol::channel::Sender; @@ -3557,6 +3558,21 @@ fn notify_server_capabilities_updated(server: &LanguageServer, cx: &mut Context< message: proto::update_language_server::Variant::MetadataUpdated( proto::ServerMetadataUpdated { capabilities: Some(capabilities), + binary: Some(proto::LanguageServerBinaryInfo { + path: server.binary().path.to_string_lossy().into_owned(), + arguments: server + .binary() + .arguments + .iter() + .map(|arg| arg.to_string_lossy().into_owned()) + .collect(), + }), + configuration: serde_json::to_string(server.configuration()).ok(), + workspace_folders: server + .workspace_folders() + .iter() + .map(|uri| uri.to_string()) + .collect(), }, ), }); @@ -3713,13 +3729,23 @@ pub enum LspStoreEvent { }, } +#[derive(Clone, Debug, Serialize)] +pub struct LanguageServerBinaryInfo { + pub path: String, + pub arguments: Vec, + pub env: Option>, +} + #[derive(Clone, Debug, Serialize)] pub struct LanguageServerStatus { pub name: LanguageServerName, pub pending_work: BTreeMap, pub has_pending_diagnostic_updates: bool, - progress_tokens: HashSet, + pub progress_tokens: HashSet, pub worktree: Option, + pub binary: Option, + pub configuration: Option, + pub workspace_folders: BTreeSet, } #[derive(Clone, Debug)] @@ -8130,6 +8156,9 @@ impl LspStore { has_pending_diagnostic_updates: false, progress_tokens: Default::default(), worktree, + binary: None, + configuration: None, + workspace_folders: BTreeSet::new(), }, ) }) @@ -9139,6 +9168,9 @@ impl LspStore { has_pending_diagnostic_updates: false, progress_tokens: Default::default(), worktree: server.worktree_id.map(WorktreeId::from_proto), + binary: None, + configuration: None, + workspace_folders: BTreeSet::new(), }, ); cx.emit(LspStoreEvent::LanguageServerAdded( @@ -11155,6 +11187,18 @@ impl LspStore { has_pending_diagnostic_updates: false, progress_tokens: Default::default(), worktree: Some(key.worktree_id), + binary: Some(LanguageServerBinaryInfo { + path: language_server.binary().path.to_string_lossy().into_owned(), + arguments: language_server + .binary() + .arguments + .iter() + .map(|arg| arg.to_string_lossy().into_owned()) + .collect(), + env: language_server.binary().env.clone(), + }), + configuration: Some(language_server.configuration().clone()), + workspace_folders: language_server.workspace_folders(), }, ); diff --git a/crates/project/src/project.rs b/crates/project/src/project.rs index 3f325aba2b18efb4f36faef4e0a655f716a860bd..63041f8dfff3a432ed8e447ab0fcdb47f519e9e7 100644 --- a/crates/project/src/project.rs +++ b/crates/project/src/project.rs @@ -37,7 +37,7 @@ use dap::inline_value::{InlineValueLocation, VariableLookupKind, VariableScope}; use crate::{ git_store::GitStore, - lsp_store::{SymbolLocation, log_store::LogKind}, + lsp_store::{LanguageServerBinaryInfo, SymbolLocation, log_store::LogKind}, project_search::SearchResultsHandle, }; pub use agent_server_store::{AgentServerStore, AgentServersUpdated, ExternalAgentServerName}; @@ -114,7 +114,7 @@ use std::{ ops::{Not as _, Range}, path::{Path, PathBuf}, pin::pin, - str, + str::{self, FromStr}, sync::Arc, time::Duration, }; @@ -3111,17 +3111,42 @@ impl Project { match message { proto::update_language_server::Variant::MetadataUpdated(update) => { - if let Some(capabilities) = update - .capabilities - .as_ref() - .and_then(|capabilities| serde_json::from_str(capabilities).ok()) - { - self.lsp_store.update(cx, |lsp_store, _| { + self.lsp_store.update(cx, |lsp_store, _| { + if let Some(capabilities) = update + .capabilities + .as_ref() + .and_then(|capabilities| serde_json::from_str(capabilities).ok()) + { lsp_store .lsp_server_capabilities .insert(*language_server_id, capabilities); - }); - } + } + + if let Some(language_server_status) = lsp_store + .language_server_statuses + .get_mut(language_server_id) + { + if let Some(binary) = &update.binary { + language_server_status.binary = + Some(LanguageServerBinaryInfo { + path: binary.path.clone(), + arguments: binary.arguments.clone(), + env: None, + }); + } + + language_server_status.configuration = update + .configuration + .as_ref() + .and_then(|config_str| serde_json::from_str(config_str).ok()); + + language_server_status.workspace_folders = update + .workspace_folders + .iter() + .filter_map(|uri_str| lsp::Uri::from_str(uri_str).ok()) + .collect(); + } + }); } proto::update_language_server::Variant::RegisteredForBuffer(update) => { if let Some(buffer_id) = BufferId::new(update.buffer_id).ok() { diff --git a/crates/proto/proto/lsp.proto b/crates/proto/proto/lsp.proto index 3bdd46c4572acbc570c198288ba5c79b93aa4286..fa44528e2ed6009e6f18b6b5b9702b5228f10f05 100644 --- a/crates/proto/proto/lsp.proto +++ b/crates/proto/proto/lsp.proto @@ -615,8 +615,16 @@ message RegisteredForBuffer { uint64 buffer_id = 2; } +message LanguageServerBinaryInfo { + string path = 1; + repeated string arguments = 2; +} + message ServerMetadataUpdated { optional string capabilities = 1; + optional LanguageServerBinaryInfo binary = 2; + optional string configuration = 3; + repeated string workspace_folders = 4; } message LanguageServerLog { From 769464762a0233f0a60aa27ec899021588085e43 Mon Sep 17 00:00:00 2001 From: Lennart Date: Mon, 24 Nov 2025 22:31:20 +0100 Subject: [PATCH 005/749] vim: Fix cursor shape after deactivation (#42834) Update the `Vim.deactivate` method to ensure that the cursor shape is reset to the one available in the user's settings, in the `cursor_shape` setting, instead of simply defaulting to `CursorShape::Bar`. In order to test this behavior, the `Editor.cursor_shape` method was also introduced. Release Notes: - Fixed the cursor shape reset in vim mode deactivation, ensuring that the user's `cursor_shape` setting is used --------- Co-authored-by: dino --- crates/editor/src/editor.rs | 4 ++++ crates/vim/src/test.rs | 26 +++++++++++++++++++++++++- crates/vim/src/vim.rs | 7 ++++++- 3 files changed, 35 insertions(+), 2 deletions(-) diff --git a/crates/editor/src/editor.rs b/crates/editor/src/editor.rs index 7a3c9b8594596152800442193e9364dc1a2c8aba..adb24900bf144b9cdedfb432e296a9a9e27a51c7 100644 --- a/crates/editor/src/editor.rs +++ b/crates/editor/src/editor.rs @@ -3025,6 +3025,10 @@ impl Editor { cx.notify(); } + pub fn cursor_shape(&self) -> CursorShape { + self.cursor_shape + } + pub fn set_current_line_highlight( &mut self, current_line_highlight: Option, diff --git a/crates/vim/src/test.rs b/crates/vim/src/test.rs index 5a98ec47b122e0d1ed7fd1edfc7c5e2265c40d90..5932a740945becae9d15025d358a52d5a4e279dd 100644 --- a/crates/vim/src/test.rs +++ b/crates/vim/src/test.rs @@ -16,7 +16,7 @@ use editor::{ use futures::StreamExt; use gpui::{KeyBinding, Modifiers, MouseButton, TestAppContext, px}; use itertools::Itertools; -use language::{Language, LanguageConfig, Point}; +use language::{CursorShape, Language, LanguageConfig, Point}; pub use neovim_backed_test_context::*; use settings::SettingsStore; use ui::Pixels; @@ -2404,3 +2404,27 @@ async fn test_repeat_grouping_41735(cx: &mut gpui::TestAppContext) { cx.simulate_shared_keystrokes("u").await; cx.shared_state().await.assert_eq("ˇaaa"); } + +#[gpui::test] +async fn test_deactivate(cx: &mut gpui::TestAppContext) { + let mut cx = VimTestContext::new(cx, true).await; + + cx.update_global(|store: &mut SettingsStore, cx| { + store.update_user_settings(cx, |settings| { + settings.editor.cursor_shape = Some(settings::CursorShape::Underline); + }); + }); + + // Assert that, while in `Normal` mode, the cursor shape is `Block` but, + // after deactivating vim mode, it should revert to the one specified in the + // user's settings, if set. + cx.update_editor(|editor, _window, _cx| { + assert_eq!(editor.cursor_shape(), CursorShape::Block); + }); + + cx.disable_vim(); + + cx.update_editor(|editor, _window, _cx| { + assert_eq!(editor.cursor_shape(), CursorShape::Underline); + }); +} diff --git a/crates/vim/src/vim.rs b/crates/vim/src/vim.rs index f87c562c8a0821f5dfea66dd33b1c44ca6021f42..1ffcf7e2224341affc7498032fd5a181e256943d 100644 --- a/crates/vim/src/vim.rs +++ b/crates/vim/src/vim.rs @@ -954,7 +954,12 @@ impl Vim { } fn deactivate(editor: &mut Editor, cx: &mut Context) { - editor.set_cursor_shape(CursorShape::Bar, cx); + editor.set_cursor_shape( + EditorSettings::get_global(cx) + .cursor_shape + .unwrap_or_default(), + cx, + ); editor.set_clip_at_line_ends(false, cx); editor.set_collapse_matches(false); editor.set_input_enabled(true); From 9e69ac889c7fac29a88f5e213826ce3be4a2895b Mon Sep 17 00:00:00 2001 From: Mayank Verma Date: Tue, 25 Nov 2025 03:15:12 +0530 Subject: [PATCH 006/749] editor: Fix copy file actions not working in remote environments (#43362) Closes #42500 Release Notes: - Fixed all three editor actions not working in remote environments - `editor: copy file name` - `editor: copy file location` - `editor: copy file name without extension` Here's the before/after: https://github.com/user-attachments/assets/bfb03e99-2e1a-47a2-bd26-280180154fe3 --- crates/collab/src/tests/editor_tests.rs | 287 +++++++++++++++++++++++- crates/editor/src/editor.rs | 27 ++- 2 files changed, 302 insertions(+), 12 deletions(-) diff --git a/crates/collab/src/tests/editor_tests.rs b/crates/collab/src/tests/editor_tests.rs index 33f07bfb388763875565bc9e37bda363f02600f0..fe20ab935c9fb2ffd2c18962953f9d62ca06fb16 100644 --- a/crates/collab/src/tests/editor_tests.rs +++ b/crates/collab/src/tests/editor_tests.rs @@ -7,8 +7,9 @@ use editor::{ DocumentColorsRenderMode, Editor, FETCH_COLORS_DEBOUNCE_TIMEOUT, MultiBufferOffset, RowInfo, SelectionEffects, actions::{ - ConfirmCodeAction, ConfirmCompletion, ConfirmRename, ContextMenuFirst, - ExpandMacroRecursively, MoveToEnd, Redo, Rename, SelectAll, ToggleCodeActions, Undo, + ConfirmCodeAction, ConfirmCompletion, ConfirmRename, ContextMenuFirst, CopyFileLocation, + CopyFileName, CopyFileNameWithoutExtension, ExpandMacroRecursively, MoveToEnd, Redo, + Rename, SelectAll, ToggleCodeActions, Undo, }, test::{ editor_test_context::{AssertionContextManager, EditorTestContext}, @@ -4269,6 +4270,288 @@ async fn test_client_can_query_lsp_ext(cx_a: &mut TestAppContext, cx_b: &mut Tes }); } +#[gpui::test] +async fn test_copy_file_name_without_extension( + cx_a: &mut TestAppContext, + cx_b: &mut TestAppContext, +) { + let mut server = TestServer::start(cx_a.executor()).await; + let client_a = server.create_client(cx_a, "user_a").await; + let client_b = server.create_client(cx_b, "user_b").await; + server + .create_room(&mut [(&client_a, cx_a), (&client_b, cx_b)]) + .await; + + cx_b.update(editor::init); + + client_a + .fs() + .insert_tree( + path!("/root"), + json!({ + "src": { + "main.rs": indoc! {" + fn main() { + println!(\"Hello, world!\"); + } + "}, + } + }), + ) + .await; + + let (project_a, worktree_id) = client_a.build_local_project(path!("/root"), cx_a).await; + let active_call_a = cx_a.read(ActiveCall::global); + let project_id = active_call_a + .update(cx_a, |call, cx| call.share_project(project_a.clone(), cx)) + .await + .unwrap(); + + let project_b = client_b.join_remote_project(project_id, cx_b).await; + + let (workspace_a, cx_a) = client_a.build_workspace(&project_a, cx_a); + let (workspace_b, cx_b) = client_b.build_workspace(&project_b, cx_b); + + let editor_a = workspace_a + .update_in(cx_a, |workspace, window, cx| { + workspace.open_path( + (worktree_id, rel_path("src/main.rs")), + None, + true, + window, + cx, + ) + }) + .await + .unwrap() + .downcast::() + .unwrap(); + + let editor_b = workspace_b + .update_in(cx_b, |workspace, window, cx| { + workspace.open_path( + (worktree_id, rel_path("src/main.rs")), + None, + true, + window, + cx, + ) + }) + .await + .unwrap() + .downcast::() + .unwrap(); + + cx_a.run_until_parked(); + cx_b.run_until_parked(); + + editor_a.update_in(cx_a, |editor, window, cx| { + editor.copy_file_name_without_extension(&CopyFileNameWithoutExtension, window, cx); + }); + + assert_eq!( + cx_a.read_from_clipboard().and_then(|item| item.text()), + Some("main".to_string()) + ); + + editor_b.update_in(cx_b, |editor, window, cx| { + editor.copy_file_name_without_extension(&CopyFileNameWithoutExtension, window, cx); + }); + + assert_eq!( + cx_b.read_from_clipboard().and_then(|item| item.text()), + Some("main".to_string()) + ); +} + +#[gpui::test] +async fn test_copy_file_name(cx_a: &mut TestAppContext, cx_b: &mut TestAppContext) { + let mut server = TestServer::start(cx_a.executor()).await; + let client_a = server.create_client(cx_a, "user_a").await; + let client_b = server.create_client(cx_b, "user_b").await; + server + .create_room(&mut [(&client_a, cx_a), (&client_b, cx_b)]) + .await; + + cx_b.update(editor::init); + + client_a + .fs() + .insert_tree( + path!("/root"), + json!({ + "src": { + "main.rs": indoc! {" + fn main() { + println!(\"Hello, world!\"); + } + "}, + } + }), + ) + .await; + + let (project_a, worktree_id) = client_a.build_local_project(path!("/root"), cx_a).await; + let active_call_a = cx_a.read(ActiveCall::global); + let project_id = active_call_a + .update(cx_a, |call, cx| call.share_project(project_a.clone(), cx)) + .await + .unwrap(); + + let project_b = client_b.join_remote_project(project_id, cx_b).await; + + let (workspace_a, cx_a) = client_a.build_workspace(&project_a, cx_a); + let (workspace_b, cx_b) = client_b.build_workspace(&project_b, cx_b); + + let editor_a = workspace_a + .update_in(cx_a, |workspace, window, cx| { + workspace.open_path( + (worktree_id, rel_path("src/main.rs")), + None, + true, + window, + cx, + ) + }) + .await + .unwrap() + .downcast::() + .unwrap(); + + let editor_b = workspace_b + .update_in(cx_b, |workspace, window, cx| { + workspace.open_path( + (worktree_id, rel_path("src/main.rs")), + None, + true, + window, + cx, + ) + }) + .await + .unwrap() + .downcast::() + .unwrap(); + + cx_a.run_until_parked(); + cx_b.run_until_parked(); + + editor_a.update_in(cx_a, |editor, window, cx| { + editor.copy_file_name(&CopyFileName, window, cx); + }); + + assert_eq!( + cx_a.read_from_clipboard().and_then(|item| item.text()), + Some("main.rs".to_string()) + ); + + editor_b.update_in(cx_b, |editor, window, cx| { + editor.copy_file_name(&CopyFileName, window, cx); + }); + + assert_eq!( + cx_b.read_from_clipboard().and_then(|item| item.text()), + Some("main.rs".to_string()) + ); +} + +#[gpui::test] +async fn test_copy_file_location(cx_a: &mut TestAppContext, cx_b: &mut TestAppContext) { + let mut server = TestServer::start(cx_a.executor()).await; + let client_a = server.create_client(cx_a, "user_a").await; + let client_b = server.create_client(cx_b, "user_b").await; + server + .create_room(&mut [(&client_a, cx_a), (&client_b, cx_b)]) + .await; + + cx_b.update(editor::init); + + client_a + .fs() + .insert_tree( + path!("/root"), + json!({ + "src": { + "main.rs": indoc! {" + fn main() { + println!(\"Hello, world!\"); + } + "}, + } + }), + ) + .await; + + let (project_a, worktree_id) = client_a.build_local_project(path!("/root"), cx_a).await; + let active_call_a = cx_a.read(ActiveCall::global); + let project_id = active_call_a + .update(cx_a, |call, cx| call.share_project(project_a.clone(), cx)) + .await + .unwrap(); + + let project_b = client_b.join_remote_project(project_id, cx_b).await; + + let (workspace_a, cx_a) = client_a.build_workspace(&project_a, cx_a); + let (workspace_b, cx_b) = client_b.build_workspace(&project_b, cx_b); + + let editor_a = workspace_a + .update_in(cx_a, |workspace, window, cx| { + workspace.open_path( + (worktree_id, rel_path("src/main.rs")), + None, + true, + window, + cx, + ) + }) + .await + .unwrap() + .downcast::() + .unwrap(); + + let editor_b = workspace_b + .update_in(cx_b, |workspace, window, cx| { + workspace.open_path( + (worktree_id, rel_path("src/main.rs")), + None, + true, + window, + cx, + ) + }) + .await + .unwrap() + .downcast::() + .unwrap(); + + cx_a.run_until_parked(); + cx_b.run_until_parked(); + + editor_a.update_in(cx_a, |editor, window, cx| { + editor.change_selections(Default::default(), window, cx, |s| { + s.select_ranges([MultiBufferOffset(16)..MultiBufferOffset(16)]); + }); + editor.copy_file_location(&CopyFileLocation, window, cx); + }); + + assert_eq!( + cx_a.read_from_clipboard().and_then(|item| item.text()), + Some(format!("{}:2", path!("src/main.rs"))) + ); + + editor_b.update_in(cx_b, |editor, window, cx| { + editor.change_selections(Default::default(), window, cx, |s| { + s.select_ranges([MultiBufferOffset(16)..MultiBufferOffset(16)]); + }); + editor.copy_file_location(&CopyFileLocation, window, cx); + }); + + assert_eq!( + cx_b.read_from_clipboard().and_then(|item| item.text()), + Some(format!("{}:2", path!("src/main.rs"))) + ); +} + #[track_caller] fn tab_undo_assert( cx_a: &mut EditorTestContext, diff --git a/crates/editor/src/editor.rs b/crates/editor/src/editor.rs index adb24900bf144b9cdedfb432e296a9a9e27a51c7..08627f1bd64be6e62581014628c57306df43623e 100644 --- a/crates/editor/src/editor.rs +++ b/crates/editor/src/editor.rs @@ -20234,18 +20234,20 @@ impl Editor { _: &mut Window, cx: &mut Context, ) { - if let Some(file) = self.target_file(cx) - && let Some(file_stem) = file.path().file_stem() - { + if let Some(file_stem) = self.active_excerpt(cx).and_then(|(_, buffer, _)| { + let file = buffer.read(cx).file()?; + file.path().file_stem() + }) { cx.write_to_clipboard(ClipboardItem::new_string(file_stem.to_string())); } } pub fn copy_file_name(&mut self, _: &CopyFileName, _: &mut Window, cx: &mut Context) { - if let Some(file) = self.target_file(cx) - && let Some(name) = file.path().file_name() - { - cx.write_to_clipboard(ClipboardItem::new_string(name.to_string())); + if let Some(file_name) = self.active_excerpt(cx).and_then(|(_, buffer, _)| { + let file = buffer.read(cx).file()?; + Some(file.file_name(cx)) + }) { + cx.write_to_clipboard(ClipboardItem::new_string(file_name.to_string())); } } @@ -20519,9 +20521,14 @@ impl Editor { .start .row + 1; - if let Some(file) = self.target_file(cx) { - let path = file.path().display(file.path_style(cx)); - cx.write_to_clipboard(ClipboardItem::new_string(format!("{path}:{selection}"))); + if let Some(file_location) = self.active_excerpt(cx).and_then(|(_, buffer, _)| { + let project = self.project()?.read(cx); + let file = buffer.read(cx).file()?; + let path = file.path().display(project.path_style(cx)); + + Some(format!("{path}:{selection}")) + }) { + cx.write_to_clipboard(ClipboardItem::new_string(file_location)); } } From f75e7582e68236629b44999b3031fee3b0d991f4 Mon Sep 17 00:00:00 2001 From: Julia Ryan Date: Mon, 24 Nov 2025 13:46:13 -0800 Subject: [PATCH 007/749] Fix zed cli in NixOS WSL instances (#43433) This fixes running `zed ` inside nixos wsl instances. We're copying the approach used elsewhere which is to try using `--exec` first, and if that fails use an actual shell which should cover the nixos case because it only puts binaries on your PATH inside the `/etc/profile` script which is sourced on shell startup. Release Notes: - N/A --------- Co-authored-by: John Tur --- crates/cli/src/main.rs | 35 +++++++++++++++++++++++++---------- 1 file changed, 25 insertions(+), 10 deletions(-) diff --git a/crates/cli/src/main.rs b/crates/cli/src/main.rs index 7dd8a3253c9a0c8440d9342e5c0b3fd19e7f9828..7988f001dab37858d36f791fa8a184fe329c4be5 100644 --- a/crates/cli/src/main.rs +++ b/crates/cli/src/main.rs @@ -12,7 +12,9 @@ use clap::Parser; use cli::{CliRequest, CliResponse, IpcHandshake, ipc::IpcOneShotServer}; use parking_lot::Mutex; use std::{ - env, fs, io, + env, + ffi::OsStr, + fs, io, path::{Path, PathBuf}, process::ExitStatus, sync::Arc, @@ -300,7 +302,6 @@ mod tests { fn parse_path_in_wsl(source: &str, wsl: &str) -> Result { let mut source = PathWithPosition::parse_str(source); - let mut command = util::command::new_std_command("wsl.exe"); let (user, distro_name) = if let Some((user, distro)) = wsl.split_once('@') { if user.is_empty() { @@ -311,20 +312,34 @@ fn parse_path_in_wsl(source: &str, wsl: &str) -> Result { (None, wsl) }; + let mut args = vec!["--distribution", distro_name]; if let Some(user) = user { - command.arg("--user").arg(user); + args.push("--user"); + args.push(user); } - let output = command - .arg("--distribution") - .arg(distro_name) + let command = [ + OsStr::new("realpath"), + OsStr::new("-s"), + source.path.as_ref(), + ]; + + let output = util::command::new_std_command("wsl.exe") + .args(&args) .arg("--exec") - .arg("realpath") - .arg("-s") - .arg(&source.path) + .args(&command) .output()?; + let result = if output.status.success() { + String::from_utf8_lossy(&output.stdout).to_string() + } else { + let fallback = util::command::new_std_command("wsl.exe") + .args(&args) + .arg("--") + .args(&command) + .output()?; + String::from_utf8_lossy(&fallback.stdout).to_string() + }; - let result = String::from_utf8_lossy(&output.stdout); source.path = Path::new(result.trim()).to_owned(); Ok(source.to_string(|path| path.to_string_lossy().into_owned())) From e499f157dda2719fe0d19f921e0eafadaca7d4a7 Mon Sep 17 00:00:00 2001 From: Kirill Bulatov Date: Mon, 24 Nov 2025 23:46:55 +0200 Subject: [PATCH 008/749] Keep single default PHP language server (#43432) https://github.com/zed-extensions/php/blob/9a119b18eeb247072964a19ce46fab54bbd1bb30/extension.toml provides 3 language servers for `php`, so `...` will always include all 3 if those are not excluded or included explicitly. Change the configs and docs so, that only one php language server is used. Release Notes: - N/A --- assets/settings/default.json | 4 ++-- docs/src/configuring-languages.md | 2 +- docs/src/languages/php.md | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/assets/settings/default.json b/assets/settings/default.json index c8ffd31617df7d057e89329c2db70c6b6aa21e95..a222a16cb290eae905a69b492e9de8d3a1493592 100644 --- a/assets/settings/default.json +++ b/assets/settings/default.json @@ -1889,7 +1889,7 @@ } }, "PHP": { - "language_servers": ["phpactor", "!intelephense", "..."], + "language_servers": ["phpactor", "!intelephense", "!phptools", "..."], "prettier": { "allowed": true, "plugins": ["@prettier/plugin-php"], @@ -2138,7 +2138,7 @@ "windows": { "languages": { "PHP": { - "language_servers": ["intelephense", "!phpactor", "..."] + "language_servers": ["intelephense", "!phpactor", "!phptools", "..."] } } }, diff --git a/docs/src/configuring-languages.md b/docs/src/configuring-languages.md index 7b3456986e2766d134f3c1f15f94632feb067fb0..e478fab075acec67967a6c44cc5966e632aa1110 100644 --- a/docs/src/configuring-languages.md +++ b/docs/src/configuring-languages.md @@ -123,7 +123,7 @@ You can specify your preference using the `language_servers` setting: ```json [settings] "languages": { "PHP": { - "language_servers": ["intelephense", "!phpactor", "..."] + "language_servers": ["intelephense", "!phpactor", "!phptools", "..."] } } ``` diff --git a/docs/src/languages/php.md b/docs/src/languages/php.md index 1d7de27c5480421e2bc4d1f150a0b6d04a5ee49c..73d5ecbf37eae6ab9b7e710c132025d217fe57bd 100644 --- a/docs/src/languages/php.md +++ b/docs/src/languages/php.md @@ -35,7 +35,7 @@ To switch to `intelephense`, add the following to your `settings.json`: { "languages": { "PHP": { - "language_servers": ["intelephense", "!phpactor", "..."] + "language_servers": ["intelephense", "!phpactor", "!phptools", "..."] } } } From 8fd2e2164c9a181fff090792ecd088a486aacf26 Mon Sep 17 00:00:00 2001 From: Julia Ryan Date: Mon, 24 Nov 2025 13:54:18 -0800 Subject: [PATCH 009/749] Fix remote project snippet duplication (#43429) Closes #43311 Release Notes: - N/A --------- Co-authored-by: John Tur --- crates/project/src/project.rs | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/crates/project/src/project.rs b/crates/project/src/project.rs index 63041f8dfff3a432ed8e447ab0fcdb47f519e9e7..8875b3bb6facfb6ce268a38a54585497c8b198cd 100644 --- a/crates/project/src/project.rs +++ b/crates/project/src/project.rs @@ -1244,9 +1244,7 @@ impl Project { let (tx, rx) = mpsc::unbounded(); cx.spawn(async move |this, cx| Self::send_buffer_ordered_messages(this, rx, cx).await) .detach(); - let global_snippets_dir = paths::snippets_dir().to_owned(); - let snippets = - SnippetProvider::new(fs.clone(), BTreeSet::from_iter([global_snippets_dir]), cx); + let snippets = SnippetProvider::new(fs.clone(), BTreeSet::from_iter([]), cx); let (remote_proto, path_style) = remote.read_with(cx, |remote, _| (remote.proto_client(), remote.path_style())); From 17d7988ad482b8aec7ea9ae2560f055e3a8ee26f Mon Sep 17 00:00:00 2001 From: Kirill Bulatov Date: Tue, 25 Nov 2025 00:05:16 +0200 Subject: [PATCH 010/749] Redact environment variables in server info view (#43436) Follow-up of https://github.com/zed-industries/zed/pull/42831 Release Notes: - N/A --- crates/language_tools/src/lsp_log_view.rs | 27 +++++------------------ crates/lsp/src/lsp.rs | 2 +- crates/project/src/lsp_store.rs | 20 ++--------------- crates/project/src/project.rs | 21 +++++++++++------- 4 files changed, 22 insertions(+), 48 deletions(-) diff --git a/crates/language_tools/src/lsp_log_view.rs b/crates/language_tools/src/lsp_log_view.rs index c7aa78067294cbb63266e55a0d05d7abbeefddc2..e7586583704750b0c84832ecb8cb9ba8d5a9b5a1 100644 --- a/crates/language_tools/src/lsp_log_view.rs +++ b/crates/language_tools/src/lsp_log_view.rs @@ -5,6 +5,7 @@ use gpui::{ App, Context, Corner, Entity, EventEmitter, FocusHandle, Focusable, IntoElement, ParentElement, Render, Styled, Subscription, Task, WeakEntity, Window, actions, div, }; +use itertools::Itertools as _; use language::{LanguageServerId, language_settings::SoftWrap}; use lsp::{ LanguageServer, LanguageServerName, LanguageServerSelector, MessageType, SetTraceParams, @@ -12,10 +13,7 @@ use lsp::{ }; use project::{ LanguageServerStatus, Project, - lsp_store::{ - LanguageServerBinaryInfo, - log_store::{self, Event, LanguageServerKind, LogKind, LogStore, Message}, - }, + lsp_store::log_store::{self, Event, LanguageServerKind, LogKind, LogStore, Message}, search::SearchQuery, }; use proto::toggle_lsp_logs::LogType; @@ -351,12 +349,8 @@ impl LspLogView { .status .workspace_folders .iter() - .filter_map(|uri| { - uri.to_file_path() - .ok() - .map(|path| path.to_string_lossy().into_owned()) - }) - .collect::>() + .filter_map(|uri| uri.to_file_path().ok()) + .map(|path| path.to_string_lossy().into_owned()) .join(", "), CAPABILITIES = serde_json::to_string_pretty(&info.capabilities) .unwrap_or_else(|e| format!("Failed to serialize capabilities: {e}")), @@ -968,7 +962,7 @@ impl Render for LspLogToolbarItemView { for (server_id, name, worktree_root, active_entry_kind) in available_language_servers.iter() { - let label = format!("{} ({})", name, worktree_root); + let label = format!("{name} ({worktree_root})"); let server_id = *server_id; let active_entry_kind = *active_entry_kind; menu = menu.entry( @@ -1338,16 +1332,7 @@ impl ServerInfo { has_pending_diagnostic_updates: false, progress_tokens: Default::default(), worktree: None, - binary: Some(LanguageServerBinaryInfo { - path: server.binary().path.to_string_lossy().into_owned(), - arguments: server - .binary() - .arguments - .iter() - .map(|arg| arg.to_string_lossy().into_owned()) - .collect(), - env: server.binary().env.clone(), - }), + binary: Some(server.binary().clone()), configuration: Some(server.configuration().clone()), workspace_folders: server.workspace_folders(), }, diff --git a/crates/lsp/src/lsp.rs b/crates/lsp/src/lsp.rs index 05771b8ce5db870a41228f81e4aac8222b11ad53..1bc635dcbeca2d38506640b86e547ce90ec76d3d 100644 --- a/crates/lsp/src/lsp.rs +++ b/crates/lsp/src/lsp.rs @@ -62,7 +62,7 @@ pub enum IoKind { /// Represents a launchable language server. This can either be a standalone binary or the path /// to a runtime with arguments to instruct it to launch the actual language server file. -#[derive(Clone)] +#[derive(Clone, Serialize)] pub struct LanguageServerBinary { pub path: PathBuf, pub arguments: Vec, diff --git a/crates/project/src/lsp_store.rs b/crates/project/src/lsp_store.rs index d18262215198b8a1d7da38a4a325b6f1dcb82084..349bfa9ed00223ea71d4d77dd32bdf433c39c784 100644 --- a/crates/project/src/lsp_store.rs +++ b/crates/project/src/lsp_store.rs @@ -3729,13 +3729,6 @@ pub enum LspStoreEvent { }, } -#[derive(Clone, Debug, Serialize)] -pub struct LanguageServerBinaryInfo { - pub path: String, - pub arguments: Vec, - pub env: Option>, -} - #[derive(Clone, Debug, Serialize)] pub struct LanguageServerStatus { pub name: LanguageServerName, @@ -3743,7 +3736,7 @@ pub struct LanguageServerStatus { pub has_pending_diagnostic_updates: bool, pub progress_tokens: HashSet, pub worktree: Option, - pub binary: Option, + pub binary: Option, pub configuration: Option, pub workspace_folders: BTreeSet, } @@ -11187,16 +11180,7 @@ impl LspStore { has_pending_diagnostic_updates: false, progress_tokens: Default::default(), worktree: Some(key.worktree_id), - binary: Some(LanguageServerBinaryInfo { - path: language_server.binary().path.to_string_lossy().into_owned(), - arguments: language_server - .binary() - .arguments - .iter() - .map(|arg| arg.to_string_lossy().into_owned()) - .collect(), - env: language_server.binary().env.clone(), - }), + binary: Some(language_server.binary().clone()), configuration: Some(language_server.configuration().clone()), workspace_folders: language_server.workspace_folders(), }, diff --git a/crates/project/src/project.rs b/crates/project/src/project.rs index 8875b3bb6facfb6ce268a38a54585497c8b198cd..149d30a5283c13f71477fc6776d5ca7f61f6205d 100644 --- a/crates/project/src/project.rs +++ b/crates/project/src/project.rs @@ -37,7 +37,7 @@ use dap::inline_value::{InlineValueLocation, VariableLookupKind, VariableScope}; use crate::{ git_store::GitStore, - lsp_store::{LanguageServerBinaryInfo, SymbolLocation, log_store::LogKind}, + lsp_store::{SymbolLocation, log_store::LogKind}, project_search::SearchResultsHandle, }; pub use agent_server_store::{AgentServerStore, AgentServersUpdated, ExternalAgentServerName}; @@ -87,7 +87,8 @@ use language::{ }; use lsp::{ CodeActionKind, CompletionContext, CompletionItemKind, DocumentHighlightKind, InsertTextMode, - LanguageServerId, LanguageServerName, LanguageServerSelector, MessageActionItem, + LanguageServerBinary, LanguageServerId, LanguageServerName, LanguageServerSelector, + MessageActionItem, }; use lsp_command::*; use lsp_store::{CompletionDocumentation, LspFormatTarget, OpenLspBufferHandle}; @@ -111,6 +112,7 @@ use snippet_provider::SnippetProvider; use std::{ borrow::Cow, collections::BTreeMap, + ffi::OsString, ops::{Not as _, Range}, path::{Path, PathBuf}, pin::pin, @@ -3125,12 +3127,15 @@ impl Project { .get_mut(language_server_id) { if let Some(binary) = &update.binary { - language_server_status.binary = - Some(LanguageServerBinaryInfo { - path: binary.path.clone(), - arguments: binary.arguments.clone(), - env: None, - }); + language_server_status.binary = Some(LanguageServerBinary { + path: PathBuf::from(&binary.path), + arguments: binary + .arguments + .iter() + .map(OsString::from) + .collect(), + env: None, + }); } language_server_status.configuration = update From 9122dd2d701a8628b4fa7ef7c82897205df2d908 Mon Sep 17 00:00:00 2001 From: Max Brunsfeld Date: Mon, 24 Nov 2025 22:17:48 -0800 Subject: [PATCH 011/749] Combine zeta and zeta2 edit prediction providers (#43284) We've realized that a lot of the logic within an `EditPredictionProvider` is not specific to a particular edit prediction model / service. Rather, it is just the generic state management required to perform edit predictions at all in Zed. We want to move to a setup where there's one "built-in" edit prediction provider in Zed, which can be pointed at different edit prediction models. The only logic that is different for different models is how we construct the prompt, send the request, and parse the output. This PR also changes the behavior of the staff-only `zeta2` feature flag so that in only gates your *ability* to use Zeta2, but you can still use your local settings file to choose between different edit prediction models/services: zeta1, zeta2, and sweep. This PR also makes zeta1's outcome reporting and prediction-rating features work with all prediction models, not just zeta1. To do: * [x] remove duplicated logic around sending cloud requests between zeta1 and zeta2 * [x] port the outcome reporting logic from zeta to zeta2. * [x] get the "rate completions" modal working with all EP models * [x] display edit prediction diff * [x] show edit history events * [x] remove the original `zeta` crate. Release Notes: - N/A --------- Co-authored-by: Agus Zubiaga Co-authored-by: Ben Kunkle --- Cargo.lock | 65 +- Cargo.toml | 2 - assets/keymaps/default-macos.json | 16 +- .../cloud_llm_client/src/predict_edits_v3.rs | 39 +- .../src/cloud_zeta2_prompt.rs | 4 +- crates/edit_prediction_button/Cargo.toml | 1 - .../src/edit_prediction_button.rs | 28 +- crates/language/src/buffer.rs | 46 +- crates/language/src/syntax_map.rs | 7 + .../settings/src/settings_content/language.rs | 29 +- crates/zed/Cargo.toml | 1 - .../zed/src/zed/edit_prediction_registry.rs | 118 +- crates/zeta/Cargo.toml | 37 +- .../{zeta2 => zeta}/src/assemble_excerpts.rs | 0 crates/zeta/src/completion_diff_element.rs | 173 - crates/zeta/src/init.rs | 110 - crates/zeta/src/onboarding_modal.rs | 12 +- crates/zeta/src/onboarding_telemetry.rs | 9 - crates/{zeta2 => zeta}/src/prediction.rs | 44 +- crates/{zeta2 => zeta}/src/provider.rs | 14 +- ...tion_modal.rs => rate_prediction_modal.rs} | 426 +- .../{zeta2 => zeta}/src/retrieval_search.rs | 0 crates/{zeta2 => zeta}/src/sweep_ai.rs | 31 +- crates/{zeta2 => zeta}/src/udiff.rs | 0 crates/{zeta2 => zeta}/src/xml_edits.rs | 0 crates/zeta/src/zeta.rs | 4925 ++++++++++------- crates/zeta/src/zeta1.rs | 500 ++ crates/zeta/src/{ => zeta1}/input_excerpt.rs | 6 +- crates/zeta/src/zeta_tests.rs | 671 +++ crates/zeta2/Cargo.toml | 61 - crates/zeta2/LICENSE-GPL | 1 - crates/zeta2/src/zeta2.rs | 2968 ---------- crates/zeta2_tools/Cargo.toml | 5 +- crates/zeta2_tools/src/zeta2_context_view.rs | 2 +- crates/zeta2_tools/src/zeta2_tools.rs | 576 +- crates/zeta_cli/Cargo.toml | 3 +- crates/zeta_cli/src/evaluate.rs | 2 +- crates/zeta_cli/src/example.rs | 4 +- crates/zeta_cli/src/main.rs | 13 +- crates/zeta_cli/src/predict.rs | 42 +- crates/zeta_cli/src/syntax_retrieval_stats.rs | 4 +- 41 files changed, 4900 insertions(+), 6095 deletions(-) rename crates/{zeta2 => zeta}/src/assemble_excerpts.rs (100%) delete mode 100644 crates/zeta/src/completion_diff_element.rs delete mode 100644 crates/zeta/src/init.rs delete mode 100644 crates/zeta/src/onboarding_telemetry.rs rename crates/{zeta2 => zeta}/src/prediction.rs (86%) rename crates/{zeta2 => zeta}/src/provider.rs (93%) rename crates/zeta/src/{rate_completion_modal.rs => rate_prediction_modal.rs} (60%) rename crates/{zeta2 => zeta}/src/retrieval_search.rs (100%) rename crates/{zeta2 => zeta}/src/sweep_ai.rs (77%) rename crates/{zeta2 => zeta}/src/udiff.rs (100%) rename crates/{zeta2 => zeta}/src/xml_edits.rs (100%) create mode 100644 crates/zeta/src/zeta1.rs rename crates/zeta/src/{ => zeta1}/input_excerpt.rs (98%) create mode 100644 crates/zeta/src/zeta_tests.rs delete mode 100644 crates/zeta2/Cargo.toml delete mode 120000 crates/zeta2/LICENSE-GPL delete mode 100644 crates/zeta2/src/zeta2.rs diff --git a/Cargo.lock b/Cargo.lock index 63734b552d7475eacdb2ee3eac66371f7c029d28..93961b4181aa1ad721ba8d740736d86c2ae32ca2 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5309,7 +5309,6 @@ dependencies = [ "workspace", "zed_actions", "zeta", - "zeta2", ] [[package]] @@ -21316,7 +21315,6 @@ dependencies = [ "zed_actions", "zed_env_vars", "zeta", - "zeta2", "zeta2_tools", "zlog", "zlog_settings", @@ -21636,48 +21634,52 @@ dependencies = [ "ai_onboarding", "anyhow", "arrayvec", - "call", + "brotli", + "buffer_diff", "client", "clock", "cloud_api_types", "cloud_llm_client", + "cloud_zeta2_prompt", "collections", "command_palette_hooks", "copilot", "ctor", "db", "edit_prediction", + "edit_prediction_context", "editor", "feature_flags", "fs", "futures 0.3.31", "gpui", - "http_client", "indoc", "itertools 0.14.0", "language", "language_model", "log", + "lsp", + "markdown", "menu", + "open_ai", "parking_lot", "postage", + "pretty_assertions", "project", "rand 0.9.2", "regex", "release_channel", - "reqwest_client", - "rpc", "semver", "serde", "serde_json", "settings", + "smol", + "strsim", "strum 0.27.2", "telemetry", "telemetry_events", "theme", "thiserror 2.0.17", - "tree-sitter-go", - "tree-sitter-rust", "ui", "util", "uuid", @@ -21687,53 +21689,11 @@ dependencies = [ "zlog", ] -[[package]] -name = "zeta2" -version = "0.1.0" -dependencies = [ - "anyhow", - "arrayvec", - "brotli", - "chrono", - "client", - "clock", - "cloud_llm_client", - "cloud_zeta2_prompt", - "collections", - "edit_prediction", - "edit_prediction_context", - "feature_flags", - "futures 0.3.31", - "gpui", - "indoc", - "language", - "language_model", - "log", - "lsp", - "open_ai", - "pretty_assertions", - "project", - "release_channel", - "semver", - "serde", - "serde_json", - "settings", - "smol", - "strsim", - "thiserror 2.0.17", - "util", - "uuid", - "workspace", - "worktree", - "zlog", -] - [[package]] name = "zeta2_tools" version = "0.1.0" dependencies = [ "anyhow", - "chrono", "clap", "client", "cloud_llm_client", @@ -21746,9 +21706,7 @@ dependencies = [ "gpui", "indoc", "language", - "log", "multi_buffer", - "ordered-float 2.10.1", "pretty_assertions", "project", "serde", @@ -21760,7 +21718,7 @@ dependencies = [ "ui_input", "util", "workspace", - "zeta2", + "zeta", "zlog", ] @@ -21810,7 +21768,6 @@ dependencies = [ "util", "watch", "zeta", - "zeta2", "zlog", ] diff --git a/Cargo.toml b/Cargo.toml index e3ba2cb817357f5733179864bc23161d01aa1123..ab18418939e1b7100684e3c0acec277e7ec75a88 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -201,7 +201,6 @@ members = [ "crates/zed_actions", "crates/zed_env_vars", "crates/zeta", - "crates/zeta2", "crates/zeta_cli", "crates/zlog", "crates/zlog_settings", @@ -433,7 +432,6 @@ zed = { path = "crates/zed" } zed_actions = { path = "crates/zed_actions" } zed_env_vars = { path = "crates/zed_env_vars" } zeta = { path = "crates/zeta" } -zeta2 = { path = "crates/zeta2" } zlog = { path = "crates/zlog" } zlog_settings = { path = "crates/zlog_settings" } diff --git a/assets/keymaps/default-macos.json b/assets/keymaps/default-macos.json index 2f7c25a3560e09bccb9f45c64df38048eefdddd6..a298db28e63fd761f2f6d58827a7bcf5c8b39962 100644 --- a/assets/keymaps/default-macos.json +++ b/assets/keymaps/default-macos.json @@ -1218,23 +1218,23 @@ } }, { - "context": "RateCompletionModal", + "context": "RatePredictionsModal", "use_key_equivalents": true, "bindings": { - "cmd-shift-enter": "zeta::ThumbsUpActiveCompletion", - "cmd-shift-backspace": "zeta::ThumbsDownActiveCompletion", + "cmd-shift-enter": "zeta::ThumbsUpActivePrediction", + "cmd-shift-backspace": "zeta::ThumbsDownActivePrediction", "shift-down": "zeta::NextEdit", "shift-up": "zeta::PreviousEdit", - "right": "zeta::PreviewCompletion" + "right": "zeta::PreviewPrediction" } }, { - "context": "RateCompletionModal > Editor", + "context": "RatePredictionsModal > Editor", "use_key_equivalents": true, "bindings": { - "escape": "zeta::FocusCompletions", - "cmd-shift-enter": "zeta::ThumbsUpActiveCompletion", - "cmd-shift-backspace": "zeta::ThumbsDownActiveCompletion" + "escape": "zeta::FocusPredictions", + "cmd-shift-enter": "zeta::ThumbsUpActivePrediction", + "cmd-shift-backspace": "zeta::ThumbsDownActivePrediction" } }, { diff --git a/crates/cloud_llm_client/src/predict_edits_v3.rs b/crates/cloud_llm_client/src/predict_edits_v3.rs index 32a5a34d9d3b63332008a9f7df84a1990f87f17c..47e5e71589c806f71725ee4f218ca4a86bee62d0 100644 --- a/crates/cloud_llm_client/src/predict_edits_v3.rs +++ b/crates/cloud_llm_client/src/predict_edits_v3.rs @@ -3,7 +3,7 @@ use serde::{Deserialize, Serialize}; use std::{ fmt::{Display, Write as _}, ops::{Add, Range, Sub}, - path::{Path, PathBuf}, + path::Path, sync::Arc, }; use strum::EnumIter; @@ -17,7 +17,7 @@ pub struct PlanContextRetrievalRequest { pub excerpt_path: Arc, pub excerpt_line_range: Range, pub cursor_file_max_row: Line, - pub events: Vec, + pub events: Vec>, } #[derive(Debug, Clone, Serialize, Deserialize)] @@ -36,7 +36,7 @@ pub struct PredictEditsRequest { pub signatures: Vec, #[serde(skip_serializing_if = "Vec::is_empty", default)] pub referenced_declarations: Vec, - pub events: Vec, + pub events: Vec>, #[serde(default)] pub can_collect_data: bool, #[serde(skip_serializing_if = "Vec::is_empty", default)] @@ -120,10 +120,11 @@ impl std::fmt::Display for PromptFormat { #[serde(tag = "event")] pub enum Event { BufferChange { - path: Option, - old_path: Option, + path: Arc, + old_path: Arc, diff: String, predicted: bool, + in_open_source_repo: bool, }, } @@ -135,23 +136,21 @@ impl Display for Event { old_path, diff, predicted, + .. } => { - let new_path = path.as_deref().unwrap_or(Path::new("untitled")); - let old_path = old_path.as_deref().unwrap_or(new_path); - if *predicted { write!( f, "// User accepted prediction:\n--- a/{}\n+++ b/{}\n{diff}", DiffPathFmt(old_path), - DiffPathFmt(new_path) + DiffPathFmt(path) ) } else { write!( f, "--- a/{}\n+++ b/{}\n{diff}", DiffPathFmt(old_path), - DiffPathFmt(new_path) + DiffPathFmt(path) ) } } @@ -300,10 +299,11 @@ mod tests { #[test] fn test_event_display() { let ev = Event::BufferChange { - path: None, - old_path: None, + path: Path::new("untitled").into(), + old_path: Path::new("untitled").into(), diff: "@@ -1,2 +1,2 @@\n-a\n-b\n".into(), predicted: false, + in_open_source_repo: true, }; assert_eq!( ev.to_string(), @@ -317,10 +317,11 @@ mod tests { ); let ev = Event::BufferChange { - path: Some(PathBuf::from("foo/bar.txt")), - old_path: Some(PathBuf::from("foo/bar.txt")), + path: Path::new("foo/bar.txt").into(), + old_path: Path::new("foo/bar.txt").into(), diff: "@@ -1,2 +1,2 @@\n-a\n-b\n".into(), predicted: false, + in_open_source_repo: true, }; assert_eq!( ev.to_string(), @@ -334,10 +335,11 @@ mod tests { ); let ev = Event::BufferChange { - path: Some(PathBuf::from("abc.txt")), - old_path: Some(PathBuf::from("123.txt")), + path: Path::new("abc.txt").into(), + old_path: Path::new("123.txt").into(), diff: "@@ -1,2 +1,2 @@\n-a\n-b\n".into(), predicted: false, + in_open_source_repo: true, }; assert_eq!( ev.to_string(), @@ -351,10 +353,11 @@ mod tests { ); let ev = Event::BufferChange { - path: Some(PathBuf::from("abc.txt")), - old_path: Some(PathBuf::from("123.txt")), + path: Path::new("abc.txt").into(), + old_path: Path::new("123.txt").into(), diff: "@@ -1,2 +1,2 @@\n-a\n-b\n".into(), predicted: true, + in_open_source_repo: true, }; assert_eq!( ev.to_string(), diff --git a/crates/cloud_zeta2_prompt/src/cloud_zeta2_prompt.rs b/crates/cloud_zeta2_prompt/src/cloud_zeta2_prompt.rs index 2ddabf750be763542bfc10b794afcb034ff08443..d67190c17556c5eb8b901e9baad73cc2691a9c78 100644 --- a/crates/cloud_zeta2_prompt/src/cloud_zeta2_prompt.rs +++ b/crates/cloud_zeta2_prompt/src/cloud_zeta2_prompt.rs @@ -432,7 +432,7 @@ pub fn write_excerpts<'a>( } } -pub fn push_events(output: &mut String, events: &[predict_edits_v3::Event]) { +pub fn push_events(output: &mut String, events: &[Arc]) { if events.is_empty() { return; }; @@ -910,7 +910,7 @@ fn declaration_size(declaration: &ReferencedDeclaration, style: DeclarationStyle } struct PromptData { - events: Vec, + events: Vec>, cursor_point: Point, cursor_path: Arc, // TODO: make a common struct with cursor_point included_files: Vec, diff --git a/crates/edit_prediction_button/Cargo.toml b/crates/edit_prediction_button/Cargo.toml index 9877b70161b3fdd16a0f667d85085520c9fe4f86..9062aca3c56f527385aecb000ebcd625f588eb9a 100644 --- a/crates/edit_prediction_button/Cargo.toml +++ b/crates/edit_prediction_button/Cargo.toml @@ -35,7 +35,6 @@ ui.workspace = true workspace.workspace = true zed_actions.workspace = true zeta.workspace = true -zeta2.workspace = true [dev-dependencies] copilot = { workspace = true, features = ["test-support"] } diff --git a/crates/edit_prediction_button/src/edit_prediction_button.rs b/crates/edit_prediction_button/src/edit_prediction_button.rs index 051ca6e85ccb985ba6b325cda725f83029aa3193..254caa698aa05214f73a749e540233952db4978b 100644 --- a/crates/edit_prediction_button/src/edit_prediction_button.rs +++ b/crates/edit_prediction_button/src/edit_prediction_button.rs @@ -21,7 +21,9 @@ use language::{ use project::DisableAiSettings; use regex::Regex; use settings::{ - EXPERIMENTAL_SWEEP_EDIT_PREDICTION_PROVIDER_NAME, Settings, SettingsStore, update_settings_file, + EXPERIMENTAL_SWEEP_EDIT_PREDICTION_PROVIDER_NAME, + EXPERIMENTAL_ZETA2_EDIT_PREDICTION_PROVIDER_NAME, Settings, SettingsStore, + update_settings_file, }; use std::{ sync::{Arc, LazyLock}, @@ -38,7 +40,7 @@ use workspace::{ }; use zed_actions::OpenBrowser; use zeta::RateCompletions; -use zeta2::SweepFeatureFlag; +use zeta::{SweepFeatureFlag, Zeta2FeatureFlag}; actions!( edit_prediction, @@ -300,10 +302,7 @@ impl Render for EditPredictionButton { .with_handle(self.popover_menu_handle.clone()), ) } - provider @ (EditPredictionProvider::Experimental( - EXPERIMENTAL_SWEEP_EDIT_PREDICTION_PROVIDER_NAME, - ) - | EditPredictionProvider::Zed) => { + provider @ (EditPredictionProvider::Experimental(_) | EditPredictionProvider::Zed) => { let enabled = self.editor_enabled.unwrap_or(true); let is_sweep = matches!( @@ -430,9 +429,7 @@ impl Render for EditPredictionButton { div().child(popover_menu.into_any_element()) } - EditPredictionProvider::None | EditPredictionProvider::Experimental(_) => { - div().hidden() - } + EditPredictionProvider::None => div().hidden(), } } } @@ -497,6 +494,12 @@ impl EditPredictionButton { )); } + if cx.has_flag::() { + providers.push(EditPredictionProvider::Experimental( + EXPERIMENTAL_ZETA2_EDIT_PREDICTION_PROVIDER_NAME, + )); + } + providers } @@ -554,7 +557,7 @@ impl EditPredictionButton { EditPredictionProvider::Experimental( EXPERIMENTAL_SWEEP_EDIT_PREDICTION_PROVIDER_NAME, ) => { - let has_api_token = zeta2::Zeta::try_global(cx) + let has_api_token = zeta::Zeta::try_global(cx) .map_or(false, |zeta| zeta.read(cx).has_sweep_api_token()); let entry = ContextMenuEntry::new("Sweep") @@ -571,6 +574,11 @@ impl EditPredictionButton { menu.item(entry) } + EditPredictionProvider::Experimental( + EXPERIMENTAL_ZETA2_EDIT_PREDICTION_PROVIDER_NAME, + ) => menu.entry("Zeta2", None, move |_, cx| { + set_completion_provider(fs.clone(), cx, provider); + }), EditPredictionProvider::None | EditPredictionProvider::Experimental(_) => { continue; } diff --git a/crates/language/src/buffer.rs b/crates/language/src/buffer.rs index fd5e6fcaf6435a2836ab1ad828933a9d0763f5b9..c599a4751b60f150e31b7ddf6e32a6234a510c74 100644 --- a/crates/language/src/buffer.rs +++ b/crates/language/src/buffer.rs @@ -13,6 +13,7 @@ use crate::{ }, task_context::RunnableRange, text_diff::text_diff, + unified_diff, }; pub use crate::{ Grammar, Language, LanguageRegistry, @@ -745,6 +746,33 @@ pub struct EditPreview { } impl EditPreview { + pub fn as_unified_diff(&self, edits: &[(Range, impl AsRef)]) -> Option { + let (first, _) = edits.first()?; + let (last, _) = edits.last()?; + + let start = first.start.to_point(&self.old_snapshot); + let old_end = last.end.to_point(&self.old_snapshot); + let new_end = last + .end + .bias_right(&self.old_snapshot) + .to_point(&self.applied_edits_snapshot); + + let start = Point::new(start.row.saturating_sub(3), 0); + let old_end = Point::new(old_end.row + 3, 0).min(self.old_snapshot.max_point()); + let new_end = Point::new(new_end.row + 3, 0).min(self.applied_edits_snapshot.max_point()); + + Some(unified_diff( + &self + .old_snapshot + .text_for_range(start..old_end) + .collect::(), + &self + .applied_edits_snapshot + .text_for_range(start..new_end) + .collect::(), + )) + } + pub fn highlight_edits( &self, current_snapshot: &BufferSnapshot, @@ -758,6 +786,8 @@ impl EditPreview { let mut highlighted_text = HighlightedTextBuilder::default(); + let visible_range_in_preview_snapshot = + visible_range_in_preview_snapshot.to_offset(&self.applied_edits_snapshot); let mut offset_in_preview_snapshot = visible_range_in_preview_snapshot.start; let insertion_highlight_style = HighlightStyle { @@ -825,7 +855,19 @@ impl EditPreview { highlighted_text.build() } - fn compute_visible_range(&self, edits: &[(Range, T)]) -> Option> { + pub fn build_result_buffer(&self, cx: &mut App) -> Entity { + cx.new(|cx| { + let mut buffer = Buffer::local_normalized( + self.applied_edits_snapshot.as_rope().clone(), + self.applied_edits_snapshot.line_ending(), + cx, + ); + buffer.set_language(self.syntax_snapshot.root_language(), cx); + buffer + }) + } + + pub fn compute_visible_range(&self, edits: &[(Range, T)]) -> Option> { let (first, _) = edits.first()?; let (last, _) = edits.last()?; @@ -842,7 +884,7 @@ impl EditPreview { let range = Point::new(start.row, 0) ..Point::new(end.row, self.applied_edits_snapshot.line_len(end.row)); - Some(range.to_offset(&self.applied_edits_snapshot)) + Some(range) } } diff --git a/crates/language/src/syntax_map.rs b/crates/language/src/syntax_map.rs index a9ac2faad9da9d5e07261ec826dda138921717a6..33a652b6fdeb32a2adbc1743cf8a70fe453518f5 100644 --- a/crates/language/src/syntax_map.rs +++ b/crates/language/src/syntax_map.rs @@ -279,6 +279,13 @@ impl SyntaxSnapshot { self.layers.is_empty() } + pub fn root_language(&self) -> Option> { + match &self.layers.first()?.content { + SyntaxLayerContent::Parsed { language, .. } => Some(language.clone()), + SyntaxLayerContent::Pending { .. } => None, + } + } + pub fn update_count(&self) -> usize { self.update_count } diff --git a/crates/settings/src/settings_content/language.rs b/crates/settings/src/settings_content/language.rs index 78ecc270166483b13af7e169b2390ad9f76d595d..166444c44b28133cfe20933c5b12acc42edb2399 100644 --- a/crates/settings/src/settings_content/language.rs +++ b/crates/settings/src/settings_content/language.rs @@ -78,6 +78,7 @@ pub enum EditPredictionProvider { } pub const EXPERIMENTAL_SWEEP_EDIT_PREDICTION_PROVIDER_NAME: &str = "sweep"; +pub const EXPERIMENTAL_ZETA2_EDIT_PREDICTION_PROVIDER_NAME: &str = "zeta2"; impl<'de> Deserialize<'de> for EditPredictionProvider { fn deserialize(deserializer: D) -> Result @@ -101,17 +102,25 @@ impl<'de> Deserialize<'de> for EditPredictionProvider { Content::Supermaven => EditPredictionProvider::Supermaven, Content::Zed => EditPredictionProvider::Zed, Content::Codestral => EditPredictionProvider::Codestral, + Content::Experimental(name) + if name == EXPERIMENTAL_SWEEP_EDIT_PREDICTION_PROVIDER_NAME => + { + EditPredictionProvider::Experimental( + EXPERIMENTAL_SWEEP_EDIT_PREDICTION_PROVIDER_NAME, + ) + } + Content::Experimental(name) + if name == EXPERIMENTAL_ZETA2_EDIT_PREDICTION_PROVIDER_NAME => + { + EditPredictionProvider::Experimental( + EXPERIMENTAL_ZETA2_EDIT_PREDICTION_PROVIDER_NAME, + ) + } Content::Experimental(name) => { - if name == EXPERIMENTAL_SWEEP_EDIT_PREDICTION_PROVIDER_NAME { - EditPredictionProvider::Experimental( - EXPERIMENTAL_SWEEP_EDIT_PREDICTION_PROVIDER_NAME, - ) - } else { - return Err(D::Error::custom(format!( - "Unknown experimental edit prediction provider: {}", - name - ))); - } + return Err(D::Error::custom(format!( + "Unknown experimental edit prediction provider: {}", + name + ))); } }) } diff --git a/crates/zed/Cargo.toml b/crates/zed/Cargo.toml index 68ba338102202f1803ab97746ec8372adb45a66a..470f1ea28a3663838080b7e7bf98f58215a0a8fc 100644 --- a/crates/zed/Cargo.toml +++ b/crates/zed/Cargo.toml @@ -161,7 +161,6 @@ workspace.workspace = true zed_actions.workspace = true zed_env_vars.workspace = true zeta.workspace = true -zeta2.workspace = true zlog.workspace = true zlog_settings.workspace = true chrono.workspace = true diff --git a/crates/zed/src/zed/edit_prediction_registry.rs b/crates/zed/src/zed/edit_prediction_registry.rs index 577e81c6a9b36bc29a4b1d1f0cda63170c75d5a2..f413fd94cb1a48adb213120364ed2f59c4cf58e0 100644 --- a/crates/zed/src/zed/edit_prediction_registry.rs +++ b/crates/zed/src/zed/edit_prediction_registry.rs @@ -7,13 +7,14 @@ use feature_flags::FeatureFlagAppExt; use gpui::{AnyWindowHandle, App, AppContext as _, Context, Entity, WeakEntity}; use language::language_settings::{EditPredictionProvider, all_language_settings}; use language_models::MistralLanguageModelProvider; -use settings::{EXPERIMENTAL_SWEEP_EDIT_PREDICTION_PROVIDER_NAME, SettingsStore}; +use settings::{ + EXPERIMENTAL_SWEEP_EDIT_PREDICTION_PROVIDER_NAME, + EXPERIMENTAL_ZETA2_EDIT_PREDICTION_PROVIDER_NAME, SettingsStore, +}; use std::{cell::RefCell, rc::Rc, sync::Arc}; use supermaven::{Supermaven, SupermavenCompletionProvider}; use ui::Window; -use zeta::ZetaEditPredictionProvider; -use zeta2::SweepFeatureFlag; -use zeta2::Zeta2FeatureFlag; +use zeta::{SweepFeatureFlag, Zeta2FeatureFlag, ZetaEditPredictionProvider}; pub fn init(client: Arc, user_store: Entity, cx: &mut App) { let editors: Rc, AnyWindowHandle>>> = Rc::default(); @@ -100,9 +101,7 @@ pub fn init(client: Arc, user_store: Entity, cx: &mut App) { } fn clear_zeta_edit_history(_: &zeta::ClearHistory, cx: &mut App) { - if let Some(zeta) = zeta::Zeta::global(cx) { - zeta.update(cx, |zeta, _| zeta.clear_history()); - } else if let Some(zeta) = zeta2::Zeta::try_global(cx) { + if let Some(zeta) = zeta::Zeta::try_global(cx) { zeta.update(cx, |zeta, _| zeta.clear_history()); } } @@ -204,86 +203,41 @@ fn assign_edit_prediction_provider( editor.set_edit_prediction_provider(Some(provider), window, cx); } value @ (EditPredictionProvider::Experimental(_) | EditPredictionProvider::Zed) => { - let zeta2 = zeta2::Zeta::global(client, &user_store, cx); - - if let Some(project) = editor.project() { - let mut worktree = None; - if let Some(buffer) = &singleton_buffer - && let Some(file) = buffer.read(cx).file() - { - let id = file.worktree_id(cx); - worktree = project.read(cx).worktree_for_id(id, cx); - } - - if let EditPredictionProvider::Experimental(name) = value - && name == EXPERIMENTAL_SWEEP_EDIT_PREDICTION_PROVIDER_NAME - && cx.has_flag::() - { - let provider = cx.new(|cx| { - zeta2::ZetaEditPredictionProvider::new( - project.clone(), - &client, - &user_store, - cx, - ) - }); - - if let Some(buffer) = &singleton_buffer - && buffer.read(cx).file().is_some() - { - zeta2.update(cx, |zeta, cx| { - zeta.set_edit_prediction_model(zeta2::ZetaEditPredictionModel::Sweep); - zeta.register_buffer(buffer, project, cx); - }); - } - - editor.set_edit_prediction_provider(Some(provider), window, cx); - } else if user_store.read(cx).current_user().is_some() { - if cx.has_flag::() { - let zeta = zeta2::Zeta::global(client, &user_store, cx); - let provider = cx.new(|cx| { - zeta2::ZetaEditPredictionProvider::new( - project.clone(), - &client, - &user_store, - cx, - ) - }); - - // TODO [zeta2] handle multibuffers - if let Some(buffer) = &singleton_buffer - && buffer.read(cx).file().is_some() + let zeta = zeta::Zeta::global(client, &user_store, cx); + + if let Some(project) = editor.project() + && let Some(buffer) = &singleton_buffer + && buffer.read(cx).file().is_some() + { + let has_model = zeta.update(cx, |zeta, cx| { + let model = if let EditPredictionProvider::Experimental(name) = value { + if name == EXPERIMENTAL_SWEEP_EDIT_PREDICTION_PROVIDER_NAME + && cx.has_flag::() + { + zeta::ZetaEditPredictionModel::Sweep + } else if name == EXPERIMENTAL_ZETA2_EDIT_PREDICTION_PROVIDER_NAME + && cx.has_flag::() { - zeta.update(cx, |zeta, cx| { - zeta.set_edit_prediction_model( - zeta2::ZetaEditPredictionModel::ZedCloud, - ); - zeta.register_buffer(buffer, project, cx); - }); + zeta::ZetaEditPredictionModel::Zeta2 + } else { + return false; } - - editor.set_edit_prediction_provider(Some(provider), window, cx); + } else if user_store.read(cx).current_user().is_some() { + zeta::ZetaEditPredictionModel::Zeta1 } else { - let zeta = zeta::Zeta::register(worktree, client.clone(), user_store, cx); + return false; + }; - if let Some(buffer) = &singleton_buffer - && buffer.read(cx).file().is_some() - { - zeta.update(cx, |zeta, cx| { - zeta.register_buffer(buffer, project, cx); - }); - } + zeta.set_edit_prediction_model(model); + zeta.register_buffer(buffer, project, cx); + true + }); - let provider = cx.new(|cx| { - zeta::ZetaEditPredictionProvider::new( - zeta, - project.clone(), - singleton_buffer, - cx, - ) - }); - editor.set_edit_prediction_provider(Some(provider), window, cx); - } + if has_model { + let provider = cx.new(|cx| { + ZetaEditPredictionProvider::new(project.clone(), &client, &user_store, cx) + }); + editor.set_edit_prediction_provider(Some(provider), window, cx); } } } diff --git a/crates/zeta/Cargo.toml b/crates/zeta/Cargo.toml index df569c7bc39655d99ee01b464a05e0ef3873f8d6..61eeab16229d82dc01d800f37bf729aa11469afd 100644 --- a/crates/zeta/Cargo.toml +++ b/crates/zeta/Cargo.toml @@ -4,81 +4,80 @@ version = "0.1.0" edition.workspace = true publish.workspace = true license = "GPL-3.0-or-later" -exclude = ["fixtures"] [lints] workspace = true [lib] path = "src/zeta.rs" -doctest = false [features] -test-support = [] +eval-support = [] [dependencies] ai_onboarding.workspace = true anyhow.workspace = true arrayvec.workspace = true +brotli.workspace = true +buffer_diff.workspace = true client.workspace = true cloud_llm_client.workspace = true +cloud_zeta2_prompt.workspace = true +copilot.workspace = true collections.workspace = true command_palette_hooks.workspace = true -copilot.workspace = true db.workspace = true edit_prediction.workspace = true +edit_prediction_context.workspace = true editor.workspace = true feature_flags.workspace = true fs.workspace = true futures.workspace = true gpui.workspace = true -http_client.workspace = true indoc.workspace = true itertools.workspace = true language.workspace = true language_model.workspace = true log.workspace = true +lsp.workspace = true +markdown.workspace = true menu.workspace = true +open_ai.workspace = true +pretty_assertions.workspace = true postage.workspace = true project.workspace = true rand.workspace = true -regex.workspace = true release_channel.workspace = true +regex.workspace = true semver.workspace = true serde.workspace = true serde_json.workspace = true settings.workspace = true +smol.workspace = true +strsim.workspace = true strum.workspace = true telemetry.workspace = true telemetry_events.workspace = true theme.workspace = true thiserror.workspace = true -ui.workspace = true util.workspace = true +ui.workspace = true uuid.workspace = true workspace.workspace = true worktree.workspace = true zed_actions.workspace = true [dev-dependencies] -call = { workspace = true, features = ["test-support"] } -client = { workspace = true, features = ["test-support"] } clock = { workspace = true, features = ["test-support"] } cloud_api_types.workspace = true -collections = { workspace = true, features = ["test-support"] } +cloud_llm_client = { workspace = true, features = ["test-support"] } ctor.workspace = true -editor = { workspace = true, features = ["test-support"] } gpui = { workspace = true, features = ["test-support"] } -http_client = { workspace = true, features = ["test-support"] } indoc.workspace = true language = { workspace = true, features = ["test-support"] } +language_model = { workspace = true, features = ["test-support"] } +lsp.workspace = true parking_lot.workspace = true -reqwest_client = { workspace = true, features = ["test-support"] } -rpc = { workspace = true, features = ["test-support"] } +project = { workspace = true, features = ["test-support"] } settings = { workspace = true, features = ["test-support"] } -theme = { workspace = true, features = ["test-support"] } -tree-sitter-go.workspace = true -tree-sitter-rust.workspace = true -workspace = { workspace = true, features = ["test-support"] } -worktree = { workspace = true, features = ["test-support"] } zlog.workspace = true diff --git a/crates/zeta2/src/assemble_excerpts.rs b/crates/zeta/src/assemble_excerpts.rs similarity index 100% rename from crates/zeta2/src/assemble_excerpts.rs rename to crates/zeta/src/assemble_excerpts.rs diff --git a/crates/zeta/src/completion_diff_element.rs b/crates/zeta/src/completion_diff_element.rs deleted file mode 100644 index 73c3cb20cd7de5da92fbf6e5a32a8ca8d42a5933..0000000000000000000000000000000000000000 --- a/crates/zeta/src/completion_diff_element.rs +++ /dev/null @@ -1,173 +0,0 @@ -use std::cmp; - -use crate::EditPrediction; -use gpui::{ - AnyElement, App, BorderStyle, Bounds, Corners, Edges, HighlightStyle, Hsla, StyledText, - TextLayout, TextStyle, point, prelude::*, quad, size, -}; -use language::OffsetRangeExt; -use settings::Settings; -use theme::ThemeSettings; -use ui::prelude::*; - -pub struct CompletionDiffElement { - element: AnyElement, - text_layout: TextLayout, - cursor_offset: usize, -} - -impl CompletionDiffElement { - pub fn new(completion: &EditPrediction, cx: &App) -> Self { - let mut diff = completion - .snapshot - .text_for_range(completion.excerpt_range.clone()) - .collect::(); - - let mut cursor_offset_in_diff = None; - let mut delta = 0; - let mut diff_highlights = Vec::new(); - for (old_range, new_text) in completion.edits.iter() { - let old_range = old_range.to_offset(&completion.snapshot); - - if cursor_offset_in_diff.is_none() && completion.cursor_offset <= old_range.end { - cursor_offset_in_diff = - Some(completion.cursor_offset - completion.excerpt_range.start + delta); - } - - let old_start_in_diff = old_range.start - completion.excerpt_range.start + delta; - let old_end_in_diff = old_range.end - completion.excerpt_range.start + delta; - if old_start_in_diff < old_end_in_diff { - diff_highlights.push(( - old_start_in_diff..old_end_in_diff, - HighlightStyle { - background_color: Some(cx.theme().status().deleted_background), - strikethrough: Some(gpui::StrikethroughStyle { - thickness: px(1.), - color: Some(cx.theme().colors().text_muted), - }), - ..Default::default() - }, - )); - } - - if !new_text.is_empty() { - diff.insert_str(old_end_in_diff, new_text); - diff_highlights.push(( - old_end_in_diff..old_end_in_diff + new_text.len(), - HighlightStyle { - background_color: Some(cx.theme().status().created_background), - ..Default::default() - }, - )); - delta += new_text.len(); - } - } - - let cursor_offset_in_diff = cursor_offset_in_diff - .unwrap_or_else(|| completion.cursor_offset - completion.excerpt_range.start + delta); - - let settings = ThemeSettings::get_global(cx).clone(); - let text_style = TextStyle { - color: cx.theme().colors().editor_foreground, - font_size: settings.buffer_font_size(cx).into(), - font_family: settings.buffer_font.family, - font_features: settings.buffer_font.features, - font_fallbacks: settings.buffer_font.fallbacks, - line_height: relative(settings.buffer_line_height.value()), - font_weight: settings.buffer_font.weight, - font_style: settings.buffer_font.style, - ..Default::default() - }; - let element = StyledText::new(diff).with_default_highlights(&text_style, diff_highlights); - let text_layout = element.layout().clone(); - - CompletionDiffElement { - element: element.into_any_element(), - text_layout, - cursor_offset: cursor_offset_in_diff, - } - } -} - -impl IntoElement for CompletionDiffElement { - type Element = Self; - - fn into_element(self) -> Self { - self - } -} - -impl Element for CompletionDiffElement { - type RequestLayoutState = (); - type PrepaintState = (); - - fn id(&self) -> Option { - None - } - - fn source_location(&self) -> Option<&'static core::panic::Location<'static>> { - None - } - - fn request_layout( - &mut self, - _id: Option<&gpui::GlobalElementId>, - _inspector_id: Option<&gpui::InspectorElementId>, - window: &mut Window, - cx: &mut App, - ) -> (gpui::LayoutId, Self::RequestLayoutState) { - (self.element.request_layout(window, cx), ()) - } - - fn prepaint( - &mut self, - _id: Option<&gpui::GlobalElementId>, - _inspector_id: Option<&gpui::InspectorElementId>, - _bounds: gpui::Bounds, - _request_layout: &mut Self::RequestLayoutState, - window: &mut Window, - cx: &mut App, - ) -> Self::PrepaintState { - self.element.prepaint(window, cx); - } - - fn paint( - &mut self, - _id: Option<&gpui::GlobalElementId>, - _inspector_id: Option<&gpui::InspectorElementId>, - _bounds: gpui::Bounds, - _request_layout: &mut Self::RequestLayoutState, - _prepaint: &mut Self::PrepaintState, - window: &mut Window, - cx: &mut App, - ) { - if let Some(position) = self.text_layout.position_for_index(self.cursor_offset) { - let bounds = self.text_layout.bounds(); - let line_height = self.text_layout.line_height(); - let line_width = self - .text_layout - .line_layout_for_index(self.cursor_offset) - .map_or(bounds.size.width, |layout| layout.width()); - window.paint_quad(quad( - Bounds::new( - point(bounds.origin.x, position.y), - size(cmp::max(bounds.size.width, line_width), line_height), - ), - Corners::default(), - cx.theme().colors().editor_active_line_background, - Edges::default(), - Hsla::transparent_black(), - BorderStyle::default(), - )); - self.element.paint(window, cx); - window.paint_quad(quad( - Bounds::new(position, size(px(2.), line_height)), - Corners::default(), - cx.theme().players().local().cursor, - Edges::default(), - Hsla::transparent_black(), - BorderStyle::default(), - )); - } - } -} diff --git a/crates/zeta/src/init.rs b/crates/zeta/src/init.rs deleted file mode 100644 index 0167d878fa34976d7175a64269d9dfe29d18d8fe..0000000000000000000000000000000000000000 --- a/crates/zeta/src/init.rs +++ /dev/null @@ -1,110 +0,0 @@ -use std::any::{Any, TypeId}; - -use command_palette_hooks::CommandPaletteFilter; -use feature_flags::{FeatureFlagAppExt as _, PredictEditsRateCompletionsFeatureFlag}; -use gpui::actions; -use language::language_settings::EditPredictionProvider; -use project::DisableAiSettings; -use settings::{Settings, SettingsStore, update_settings_file}; -use ui::App; -use workspace::Workspace; - -use crate::{RateCompletionModal, onboarding_modal::ZedPredictModal}; - -actions!( - edit_prediction, - [ - /// Resets the edit prediction onboarding state. - ResetOnboarding, - /// Opens the rate completions modal. - RateCompletions - ] -); - -pub fn init(cx: &mut App) { - feature_gate_predict_edits_actions(cx); - - cx.observe_new(move |workspace: &mut Workspace, _, _cx| { - workspace.register_action(|workspace, _: &RateCompletions, window, cx| { - if cx.has_flag::() { - RateCompletionModal::toggle(workspace, window, cx); - } - }); - - workspace.register_action( - move |workspace, _: &zed_actions::OpenZedPredictOnboarding, window, cx| { - ZedPredictModal::toggle( - workspace, - workspace.user_store().clone(), - workspace.client().clone(), - window, - cx, - ) - }, - ); - - workspace.register_action(|workspace, _: &ResetOnboarding, _window, cx| { - update_settings_file(workspace.app_state().fs.clone(), cx, move |settings, _| { - settings - .project - .all_languages - .features - .get_or_insert_default() - .edit_prediction_provider = Some(EditPredictionProvider::None) - }); - }); - }) - .detach(); -} - -fn feature_gate_predict_edits_actions(cx: &mut App) { - let rate_completion_action_types = [TypeId::of::()]; - let reset_onboarding_action_types = [TypeId::of::()]; - let zeta_all_action_types = [ - TypeId::of::(), - TypeId::of::(), - zed_actions::OpenZedPredictOnboarding.type_id(), - TypeId::of::(), - TypeId::of::(), - TypeId::of::(), - TypeId::of::(), - TypeId::of::(), - ]; - - CommandPaletteFilter::update_global(cx, |filter, _cx| { - filter.hide_action_types(&rate_completion_action_types); - filter.hide_action_types(&reset_onboarding_action_types); - filter.hide_action_types(&[zed_actions::OpenZedPredictOnboarding.type_id()]); - }); - - cx.observe_global::(move |cx| { - let is_ai_disabled = DisableAiSettings::get_global(cx).disable_ai; - let has_feature_flag = cx.has_flag::(); - - CommandPaletteFilter::update_global(cx, |filter, _cx| { - if is_ai_disabled { - filter.hide_action_types(&zeta_all_action_types); - } else if has_feature_flag { - filter.show_action_types(&rate_completion_action_types); - } else { - filter.hide_action_types(&rate_completion_action_types); - } - }); - }) - .detach(); - - cx.observe_flag::(move |is_enabled, cx| { - if !DisableAiSettings::get_global(cx).disable_ai { - if is_enabled { - CommandPaletteFilter::update_global(cx, |filter, _cx| { - filter.show_action_types(&rate_completion_action_types); - }); - } else { - CommandPaletteFilter::update_global(cx, |filter, _cx| { - filter.hide_action_types(&rate_completion_action_types); - }); - } - } - }) - .detach(); -} diff --git a/crates/zeta/src/onboarding_modal.rs b/crates/zeta/src/onboarding_modal.rs index 94480add3053bece5017cf478e9f74065491639b..ed7adfc75476afb07f9c56b9c9c03abbbcef1134 100644 --- a/crates/zeta/src/onboarding_modal.rs +++ b/crates/zeta/src/onboarding_modal.rs @@ -1,6 +1,6 @@ use std::sync::Arc; -use crate::{ZedPredictUpsell, onboarding_event}; +use crate::ZedPredictUpsell; use ai_onboarding::EditPredictionOnboarding; use client::{Client, UserStore}; use db::kvp::Dismissable; @@ -14,6 +14,16 @@ use settings::update_settings_file; use ui::{Vector, VectorName, prelude::*}; use workspace::{ModalView, Workspace}; +#[macro_export] +macro_rules! onboarding_event { + ($name:expr) => { + telemetry::event!($name, source = "Edit Prediction Onboarding"); + }; + ($name:expr, $($key:ident $(= $value:expr)?),+ $(,)?) => { + telemetry::event!($name, source = "Edit Prediction Onboarding", $($key $(= $value)?),+); + }; +} + /// Introduces user to Zed's Edit Prediction feature pub struct ZedPredictModal { onboarding: Entity, diff --git a/crates/zeta/src/onboarding_telemetry.rs b/crates/zeta/src/onboarding_telemetry.rs deleted file mode 100644 index 3c7d5e1442947c3e8cea446ebf37597a3cce1f80..0000000000000000000000000000000000000000 --- a/crates/zeta/src/onboarding_telemetry.rs +++ /dev/null @@ -1,9 +0,0 @@ -#[macro_export] -macro_rules! onboarding_event { - ($name:expr) => { - telemetry::event!($name, source = "Edit Prediction Onboarding"); - }; - ($name:expr, $($key:ident $(= $value:expr)?),+ $(,)?) => { - telemetry::event!($name, source = "Edit Prediction Onboarding", $($key $(= $value)?),+); - }; -} diff --git a/crates/zeta2/src/prediction.rs b/crates/zeta/src/prediction.rs similarity index 86% rename from crates/zeta2/src/prediction.rs rename to crates/zeta/src/prediction.rs index e9f726ce00c36b5235919c0e185876996f4fda03..0125e739f335fc133cbff84dcd8b4c4bac3e6e7b 100644 --- a/crates/zeta2/src/prediction.rs +++ b/crates/zeta/src/prediction.rs @@ -1,7 +1,13 @@ -use std::{ops::Range, sync::Arc}; +use std::{ + ops::Range, + path::Path, + sync::Arc, + time::{Duration, Instant}, +}; use gpui::{AsyncApp, Entity, SharedString}; use language::{Anchor, Buffer, BufferSnapshot, EditPreview, OffsetRangeExt, TextBufferSnapshot}; +use serde::Serialize; #[derive(Clone, Default, Debug, PartialEq, Eq, Hash)] pub struct EditPredictionId(pub SharedString); @@ -26,6 +32,17 @@ pub struct EditPrediction { pub edit_preview: EditPreview, // We keep a reference to the buffer so that we do not need to reload it from disk when applying the prediction. pub buffer: Entity, + pub buffer_snapshotted_at: Instant, + pub response_received_at: Instant, + pub inputs: EditPredictionInputs, +} + +#[derive(Debug, Clone, Serialize)] +pub struct EditPredictionInputs { + pub events: Vec>, + pub included_files: Vec, + pub cursor_point: cloud_llm_client::predict_edits_v3::Point, + pub cursor_path: Arc, } impl EditPrediction { @@ -33,14 +50,17 @@ impl EditPrediction { id: EditPredictionId, edited_buffer: &Entity, edited_buffer_snapshot: &BufferSnapshot, - edits: Vec<(Range, Arc)>, + edits: Arc<[(Range, Arc)]>, + buffer_snapshotted_at: Instant, + response_received_at: Instant, + inputs: EditPredictionInputs, cx: &mut AsyncApp, ) -> Option { let (edits, snapshot, edit_preview_task) = edited_buffer .read_with(cx, |buffer, cx| { let new_snapshot = buffer.snapshot(); let edits: Arc<[_]> = - interpolate_edits(&edited_buffer_snapshot, &new_snapshot, edits.into())?.into(); + interpolate_edits(&edited_buffer_snapshot, &new_snapshot, edits)?.into(); Some((edits.clone(), new_snapshot, buffer.preview_edits(edits, cx))) }) @@ -53,7 +73,10 @@ impl EditPrediction { edits, snapshot, edit_preview, + inputs, buffer: edited_buffer.clone(), + buffer_snapshotted_at, + response_received_at, }) } @@ -67,6 +90,10 @@ impl EditPrediction { pub fn targets_buffer(&self, buffer: &Buffer) -> bool { self.snapshot.remote_id() == buffer.remote_id() } + + pub fn latency(&self) -> Duration { + self.response_received_at - self.buffer_snapshotted_at + } } impl std::fmt::Debug for EditPrediction { @@ -147,6 +174,17 @@ mod tests { snapshot: cx.read(|cx| buffer.read(cx).snapshot()), buffer: buffer.clone(), edit_preview, + inputs: EditPredictionInputs { + events: vec![], + included_files: vec![], + cursor_point: cloud_llm_client::predict_edits_v3::Point { + line: cloud_llm_client::predict_edits_v3::Line(0), + column: 0, + }, + cursor_path: Path::new("path.txt").into(), + }, + buffer_snapshotted_at: Instant::now(), + response_received_at: Instant::now(), }; cx.update(|cx| { diff --git a/crates/zeta2/src/provider.rs b/crates/zeta/src/provider.rs similarity index 93% rename from crates/zeta2/src/provider.rs rename to crates/zeta/src/provider.rs index 768af6253fe1a2aa60ef9cb0a10fcee0035dc3e2..a2b3eed1b5efe953ebdf5a2448ca06e7866bea86 100644 --- a/crates/zeta2/src/provider.rs +++ b/crates/zeta/src/provider.rs @@ -131,8 +131,14 @@ impl EditPredictionProvider for ZetaEditPredictionProvider { } fn discard(&mut self, cx: &mut Context) { - self.zeta.update(cx, |zeta, _cx| { - zeta.discard_current_prediction(&self.project); + self.zeta.update(cx, |zeta, cx| { + zeta.discard_current_prediction(&self.project, cx); + }); + } + + fn did_show(&mut self, cx: &mut Context) { + self.zeta.update(cx, |zeta, cx| { + zeta.did_show_current_prediction(&self.project, cx); }); } @@ -162,8 +168,8 @@ impl EditPredictionProvider for ZetaEditPredictionProvider { let snapshot = buffer.snapshot(); let Some(edits) = prediction.interpolate(&snapshot) else { - self.zeta.update(cx, |zeta, _cx| { - zeta.discard_current_prediction(&self.project); + self.zeta.update(cx, |zeta, cx| { + zeta.discard_current_prediction(&self.project, cx); }); return None; }; diff --git a/crates/zeta/src/rate_completion_modal.rs b/crates/zeta/src/rate_prediction_modal.rs similarity index 60% rename from crates/zeta/src/rate_completion_modal.rs rename to crates/zeta/src/rate_prediction_modal.rs index a081538f5528946ea5b959981b7bd70d44b8b11b..0cceb86608ed609122c81d406c71280894789e88 100644 --- a/crates/zeta/src/rate_completion_modal.rs +++ b/crates/zeta/src/rate_prediction_modal.rs @@ -1,8 +1,18 @@ -use crate::{CompletionDiffElement, EditPrediction, EditPredictionRating, Zeta}; -use editor::Editor; -use gpui::{App, DismissEvent, Entity, EventEmitter, FocusHandle, Focusable, actions, prelude::*}; -use language::language_settings; +use crate::{EditPrediction, EditPredictionRating, Zeta}; +use buffer_diff::{BufferDiff, BufferDiffSnapshot}; +use cloud_zeta2_prompt::write_codeblock; +use editor::{Editor, ExcerptRange, MultiBuffer}; +use gpui::{ + App, BorderStyle, DismissEvent, EdgesRefinement, Entity, EventEmitter, FocusHandle, Focusable, + Length, StyleRefinement, TextStyleRefinement, Window, actions, prelude::*, +}; +use language::{LanguageRegistry, Point, language_settings}; +use markdown::{Markdown, MarkdownStyle}; +use settings::Settings as _; +use std::fmt::Write; +use std::sync::Arc; use std::time::Duration; +use theme::ThemeSettings; use ui::{KeyBinding, List, ListItem, ListItemSpacing, Tooltip, prelude::*}; use workspace::{ModalView, Workspace}; @@ -10,41 +20,44 @@ actions!( zeta, [ /// Rates the active completion with a thumbs up. - ThumbsUpActiveCompletion, + ThumbsUpActivePrediction, /// Rates the active completion with a thumbs down. - ThumbsDownActiveCompletion, + ThumbsDownActivePrediction, /// Navigates to the next edit in the completion history. NextEdit, /// Navigates to the previous edit in the completion history. PreviousEdit, /// Focuses on the completions list. - FocusCompletions, + FocusPredictions, /// Previews the selected completion. - PreviewCompletion, + PreviewPrediction, ] ); -pub struct RateCompletionModal { +pub struct RatePredictionsModal { zeta: Entity, - active_completion: Option, + language_registry: Arc, + active_prediction: Option, selected_index: usize, + diff_editor: Entity, focus_handle: FocusHandle, _subscription: gpui::Subscription, - current_view: RateCompletionView, + current_view: RatePredictionView, } -struct ActiveCompletion { - completion: EditPrediction, +struct ActivePrediction { + prediction: EditPrediction, feedback_editor: Entity, + formatted_inputs: Entity, } #[derive(Debug, Clone, Copy, PartialEq, PartialOrd)] -enum RateCompletionView { +enum RatePredictionView { SuggestedEdits, RawInput, } -impl RateCompletionView { +impl RatePredictionView { pub fn name(&self) -> &'static str { match self { Self::SuggestedEdits => "Suggested Edits", @@ -53,25 +66,42 @@ impl RateCompletionView { } } -impl RateCompletionModal { +impl RatePredictionsModal { pub fn toggle(workspace: &mut Workspace, window: &mut Window, cx: &mut Context) { - if let Some(zeta) = Zeta::global(cx) { - workspace.toggle_modal(window, cx, |_window, cx| RateCompletionModal::new(zeta, cx)); + if let Some(zeta) = Zeta::try_global(cx) { + let language_registry = workspace.app_state().languages.clone(); + workspace.toggle_modal(window, cx, |window, cx| { + RatePredictionsModal::new(zeta, language_registry, window, cx) + }); - telemetry::event!("Rate Completion Modal Open", source = "Edit Prediction"); + telemetry::event!("Rate Prediction Modal Open", source = "Edit Prediction"); } } - pub fn new(zeta: Entity, cx: &mut Context) -> Self { + pub fn new( + zeta: Entity, + language_registry: Arc, + window: &mut Window, + cx: &mut Context, + ) -> Self { let subscription = cx.observe(&zeta, |_, _, cx| cx.notify()); Self { zeta, + language_registry, selected_index: 0, focus_handle: cx.focus_handle(), - active_completion: None, + active_prediction: None, _subscription: subscription, - current_view: RateCompletionView::SuggestedEdits, + diff_editor: cx.new(|cx| { + let multibuffer = cx.new(|_| MultiBuffer::new(language::Capability::ReadOnly)); + let mut editor = Editor::for_multibuffer(multibuffer, None, window, cx); + editor.disable_inline_diagnostics(); + editor.set_expand_all_diff_hunks(cx); + editor.set_show_git_diff_gutter(false, cx); + editor + }), + current_view: RatePredictionView::SuggestedEdits, } } @@ -83,7 +113,7 @@ impl RateCompletionModal { self.selected_index += 1; self.selected_index = usize::min( self.selected_index, - self.zeta.read(cx).shown_completions().count(), + self.zeta.read(cx).shown_predictions().count(), ); cx.notify(); } @@ -102,7 +132,7 @@ impl RateCompletionModal { let next_index = self .zeta .read(cx) - .shown_completions() + .shown_predictions() .skip(self.selected_index) .enumerate() .skip(1) // Skip straight to the next item @@ -122,7 +152,7 @@ impl RateCompletionModal { let prev_index = self .zeta .read(cx) - .shown_completions() + .shown_predictions() .rev() .skip((completions_len - 1) - self.selected_index) .enumerate() @@ -149,14 +179,14 @@ impl RateCompletionModal { pub fn thumbs_up_active( &mut self, - _: &ThumbsUpActiveCompletion, + _: &ThumbsUpActivePrediction, window: &mut Window, cx: &mut Context, ) { self.zeta.update(cx, |zeta, cx| { - if let Some(active) = &self.active_completion { - zeta.rate_completion( - &active.completion, + if let Some(active) = &self.active_prediction { + zeta.rate_prediction( + &active.prediction, EditPredictionRating::Positive, active.feedback_editor.read(cx).text(cx), cx, @@ -165,9 +195,9 @@ impl RateCompletionModal { }); let current_completion = self - .active_completion + .active_prediction .as_ref() - .map(|completion| completion.completion.clone()); + .map(|completion| completion.prediction.clone()); self.select_completion(current_completion, false, window, cx); self.select_next_edit(&Default::default(), window, cx); self.confirm(&Default::default(), window, cx); @@ -177,18 +207,18 @@ impl RateCompletionModal { pub fn thumbs_down_active( &mut self, - _: &ThumbsDownActiveCompletion, + _: &ThumbsDownActivePrediction, window: &mut Window, cx: &mut Context, ) { - if let Some(active) = &self.active_completion { + if let Some(active) = &self.active_prediction { if active.feedback_editor.read(cx).text(cx).is_empty() { return; } self.zeta.update(cx, |zeta, cx| { - zeta.rate_completion( - &active.completion, + zeta.rate_prediction( + &active.prediction, EditPredictionRating::Negative, active.feedback_editor.read(cx).text(cx), cx, @@ -197,9 +227,9 @@ impl RateCompletionModal { } let current_completion = self - .active_completion + .active_prediction .as_ref() - .map(|completion| completion.completion.clone()); + .map(|completion| completion.prediction.clone()); self.select_completion(current_completion, false, window, cx); self.select_next_edit(&Default::default(), window, cx); self.confirm(&Default::default(), window, cx); @@ -209,7 +239,7 @@ impl RateCompletionModal { fn focus_completions( &mut self, - _: &FocusCompletions, + _: &FocusPredictions, window: &mut Window, cx: &mut Context, ) { @@ -219,14 +249,14 @@ impl RateCompletionModal { fn preview_completion( &mut self, - _: &PreviewCompletion, + _: &PreviewPrediction, window: &mut Window, cx: &mut Context, ) { let completion = self .zeta .read(cx) - .shown_completions() + .shown_predictions() .skip(self.selected_index) .take(1) .next() @@ -239,7 +269,7 @@ impl RateCompletionModal { let completion = self .zeta .read(cx) - .shown_completions() + .shown_predictions() .skip(self.selected_index) .take(1) .next() @@ -250,54 +280,145 @@ impl RateCompletionModal { pub fn select_completion( &mut self, - completion: Option, + prediction: Option, focus: bool, window: &mut Window, cx: &mut Context, ) { // Avoid resetting completion rating if it's already selected. - if let Some(completion) = completion.as_ref() { + if let Some(prediction) = prediction { self.selected_index = self .zeta .read(cx) - .shown_completions() + .shown_predictions() .enumerate() - .find(|(_, completion_b)| completion.id == completion_b.id) + .find(|(_, completion_b)| prediction.id == completion_b.id) .map(|(ix, _)| ix) .unwrap_or(self.selected_index); cx.notify(); - if let Some(prev_completion) = self.active_completion.as_ref() - && completion.id == prev_completion.completion.id + if let Some(prev_prediction) = self.active_prediction.as_ref() + && prediction.id == prev_prediction.prediction.id { if focus { - window.focus(&prev_completion.feedback_editor.focus_handle(cx)); + window.focus(&prev_prediction.feedback_editor.focus_handle(cx)); } return; } + + self.diff_editor.update(cx, |editor, cx| { + let new_buffer = prediction.edit_preview.build_result_buffer(cx); + let new_buffer_snapshot = new_buffer.read(cx).snapshot(); + let old_buffer_snapshot = prediction.snapshot.clone(); + let new_buffer_id = new_buffer_snapshot.remote_id(); + + let range = prediction + .edit_preview + .compute_visible_range(&prediction.edits) + .unwrap_or(Point::zero()..Point::zero()); + let start = Point::new(range.start.row.saturating_sub(5), 0); + let end = Point::new(range.end.row + 5, 0).min(new_buffer_snapshot.max_point()); + + let diff = cx.new::(|cx| { + let diff_snapshot = BufferDiffSnapshot::new_with_base_buffer( + new_buffer_snapshot.text.clone(), + Some(old_buffer_snapshot.text().into()), + old_buffer_snapshot.clone(), + cx, + ); + let diff = BufferDiff::new(&new_buffer_snapshot, cx); + cx.spawn(async move |diff, cx| { + let diff_snapshot = diff_snapshot.await; + diff.update(cx, |diff, cx| { + diff.set_snapshot(diff_snapshot, &new_buffer_snapshot.text, cx); + }) + }) + .detach(); + diff + }); + + editor.disable_header_for_buffer(new_buffer_id, cx); + editor.buffer().update(cx, |multibuffer, cx| { + multibuffer.clear(cx); + multibuffer.push_excerpts( + new_buffer, + vec![ExcerptRange { + context: start..end, + primary: start..end, + }], + cx, + ); + multibuffer.add_diff(diff, cx); + }); + }); + + let mut formatted_inputs = String::new(); + + write!(&mut formatted_inputs, "## Events\n\n").unwrap(); + + for event in &prediction.inputs.events { + write!(&mut formatted_inputs, "```diff\n{event}```\n\n").unwrap(); + } + + write!(&mut formatted_inputs, "## Included files\n\n").unwrap(); + + for included_file in &prediction.inputs.included_files { + let cursor_insertions = &[(prediction.inputs.cursor_point, "<|CURSOR|>")]; + + write!( + &mut formatted_inputs, + "### {}\n\n", + included_file.path.display() + ) + .unwrap(); + + write_codeblock( + &included_file.path, + &included_file.excerpts, + if included_file.path == prediction.inputs.cursor_path { + cursor_insertions + } else { + &[] + }, + included_file.max_row, + false, + &mut formatted_inputs, + ); + } + + self.active_prediction = Some(ActivePrediction { + prediction, + feedback_editor: cx.new(|cx| { + let mut editor = Editor::multi_line(window, cx); + editor.disable_scrollbars_and_minimap(window, cx); + editor.set_soft_wrap_mode(language_settings::SoftWrap::EditorWidth, cx); + editor.set_show_line_numbers(false, cx); + editor.set_show_git_diff_gutter(false, cx); + editor.set_show_code_actions(false, cx); + editor.set_show_runnables(false, cx); + editor.set_show_breakpoints(false, cx); + editor.set_show_wrap_guides(false, cx); + editor.set_show_indent_guides(false, cx); + editor.set_show_edit_predictions(Some(false), window, cx); + editor.set_placeholder_text("Add your feedback…", window, cx); + if focus { + cx.focus_self(window); + } + editor + }), + formatted_inputs: cx.new(|cx| { + Markdown::new( + formatted_inputs.into(), + Some(self.language_registry.clone()), + None, + cx, + ) + }), + }); + } else { + self.active_prediction = None; } - self.active_completion = completion.map(|completion| ActiveCompletion { - completion, - feedback_editor: cx.new(|cx| { - let mut editor = Editor::multi_line(window, cx); - editor.disable_scrollbars_and_minimap(window, cx); - editor.set_soft_wrap_mode(language_settings::SoftWrap::EditorWidth, cx); - editor.set_show_line_numbers(false, cx); - editor.set_show_git_diff_gutter(false, cx); - editor.set_show_code_actions(false, cx); - editor.set_show_runnables(false, cx); - editor.set_show_breakpoints(false, cx); - editor.set_show_wrap_guides(false, cx); - editor.set_show_indent_guides(false, cx); - editor.set_show_edit_predictions(Some(false), window, cx); - editor.set_placeholder_text("Add your feedback…", window, cx); - if focus { - cx.focus_self(window); - } - editor - }), - }); cx.notify(); } @@ -312,33 +433,31 @@ impl RateCompletionModal { .child( Button::new( ElementId::Name("suggested-edits".into()), - RateCompletionView::SuggestedEdits.name(), + RatePredictionView::SuggestedEdits.name(), ) .label_size(LabelSize::Small) .on_click(cx.listener(move |this, _, _window, cx| { - this.current_view = RateCompletionView::SuggestedEdits; + this.current_view = RatePredictionView::SuggestedEdits; cx.notify(); })) - .toggle_state(self.current_view == RateCompletionView::SuggestedEdits), + .toggle_state(self.current_view == RatePredictionView::SuggestedEdits), ) .child( Button::new( ElementId::Name("raw-input".into()), - RateCompletionView::RawInput.name(), + RatePredictionView::RawInput.name(), ) .label_size(LabelSize::Small) .on_click(cx.listener(move |this, _, _window, cx| { - this.current_view = RateCompletionView::RawInput; + this.current_view = RatePredictionView::RawInput; cx.notify(); })) - .toggle_state(self.current_view == RateCompletionView::RawInput), + .toggle_state(self.current_view == RatePredictionView::RawInput), ) } fn render_suggested_edits(&self, cx: &mut Context) -> Option> { - let active_completion = self.active_completion.as_ref()?; let bg_color = cx.theme().colors().editor_background; - Some( div() .id("diff") @@ -347,14 +466,18 @@ impl RateCompletionModal { .bg(bg_color) .overflow_scroll() .whitespace_nowrap() - .child(CompletionDiffElement::new( - &active_completion.completion, - cx, - )), + .child(self.diff_editor.clone()), ) } - fn render_raw_input(&self, cx: &mut Context) -> Option> { + fn render_raw_input( + &self, + window: &mut Window, + cx: &mut Context, + ) -> Option> { + let theme_settings = ThemeSettings::get_global(cx); + let buffer_font_size = theme_settings.buffer_font_size(cx); + Some( v_flex() .size_full() @@ -368,30 +491,81 @@ impl RateCompletionModal { .size_full() .bg(cx.theme().colors().editor_background) .overflow_scroll() - .child(if let Some(active_completion) = &self.active_completion { - format!( - "{}\n{}", - active_completion.completion.input_events, - active_completion.completion.input_excerpt + .child(if let Some(active_prediction) = &self.active_prediction { + markdown::MarkdownElement::new( + active_prediction.formatted_inputs.clone(), + MarkdownStyle { + base_text_style: window.text_style(), + syntax: cx.theme().syntax().clone(), + code_block: StyleRefinement { + text: Some(TextStyleRefinement { + font_family: Some( + theme_settings.buffer_font.family.clone(), + ), + font_size: Some(buffer_font_size.into()), + ..Default::default() + }), + padding: EdgesRefinement { + top: Some(DefiniteLength::Absolute( + AbsoluteLength::Pixels(px(8.)), + )), + left: Some(DefiniteLength::Absolute( + AbsoluteLength::Pixels(px(8.)), + )), + right: Some(DefiniteLength::Absolute( + AbsoluteLength::Pixels(px(8.)), + )), + bottom: Some(DefiniteLength::Absolute( + AbsoluteLength::Pixels(px(8.)), + )), + }, + margin: EdgesRefinement { + top: Some(Length::Definite(px(8.).into())), + left: Some(Length::Definite(px(0.).into())), + right: Some(Length::Definite(px(0.).into())), + bottom: Some(Length::Definite(px(12.).into())), + }, + border_style: Some(BorderStyle::Solid), + border_widths: EdgesRefinement { + top: Some(AbsoluteLength::Pixels(px(1.))), + left: Some(AbsoluteLength::Pixels(px(1.))), + right: Some(AbsoluteLength::Pixels(px(1.))), + bottom: Some(AbsoluteLength::Pixels(px(1.))), + }, + border_color: Some(cx.theme().colors().border_variant), + background: Some( + cx.theme().colors().editor_background.into(), + ), + ..Default::default() + }, + ..Default::default() + }, ) + .into_any_element() } else { - "No active completion".to_string() + div() + .child("No active completion".to_string()) + .into_any_element() }), ) .id("raw-input-view"), ) } - fn render_active_completion(&mut self, cx: &mut Context) -> Option { - let active_completion = self.active_completion.as_ref()?; - let completion_id = active_completion.completion.id; + fn render_active_completion( + &mut self, + window: &mut Window, + cx: &mut Context, + ) -> Option { + let active_prediction = self.active_prediction.as_ref()?; + let completion_id = active_prediction.prediction.id.clone(); let focus_handle = &self.focus_handle(cx); let border_color = cx.theme().colors().border; let bg_color = cx.theme().colors().editor_background; - let rated = self.zeta.read(cx).is_completion_rated(completion_id); - let feedback_empty = active_completion + let rated = self.zeta.read(cx).is_prediction_rated(&completion_id); + let feedback_empty = active_prediction .feedback_editor .read(cx) .text(cx) @@ -412,10 +586,10 @@ impl RateCompletionModal { .child(self.render_view_nav(cx)) .when_some( match self.current_view { - RateCompletionView::SuggestedEdits => { + RatePredictionView::SuggestedEdits => { self.render_suggested_edits(cx) } - RateCompletionView::RawInput => self.render_raw_input(cx), + RatePredictionView::RawInput => self.render_raw_input(window, cx), }, |this, element| this.child(element), ), @@ -450,7 +624,7 @@ impl RateCompletionModal { .h_40() .pt_1() .bg(bg_color) - .child(active_completion.feedback_editor.clone()), + .child(active_prediction.feedback_editor.clone()), ) }) .child( @@ -472,7 +646,7 @@ impl RateCompletionModal { ) .child(Label::new("Rated completion.").color(Color::Muted)), ) - } else if active_completion.completion.edits.is_empty() { + } else if active_prediction.prediction.edits.is_empty() { Some( label_container .child( @@ -489,7 +663,7 @@ impl RateCompletionModal { h_flex() .gap_1() .child( - Button::new("bad", "Bad Completion") + Button::new("bad", "Bad Prediction") .icon(IconName::ThumbsDown) .icon_size(IconSize::Small) .icon_position(IconPosition::Start) @@ -500,14 +674,14 @@ impl RateCompletionModal { )) }) .key_binding(KeyBinding::for_action_in( - &ThumbsDownActiveCompletion, + &ThumbsDownActivePrediction, focus_handle, cx, )) .on_click(cx.listener(move |this, _, window, cx| { - if this.active_completion.is_some() { + if this.active_prediction.is_some() { this.thumbs_down_active( - &ThumbsDownActiveCompletion, + &ThumbsDownActivePrediction, window, cx, ); @@ -515,20 +689,20 @@ impl RateCompletionModal { })), ) .child( - Button::new("good", "Good Completion") + Button::new("good", "Good Prediction") .icon(IconName::ThumbsUp) .icon_size(IconSize::Small) .icon_position(IconPosition::Start) .disabled(rated) .key_binding(KeyBinding::for_action_in( - &ThumbsUpActiveCompletion, + &ThumbsUpActivePrediction, focus_handle, cx, )) .on_click(cx.listener(move |this, _, window, cx| { - if this.active_completion.is_some() { + if this.active_prediction.is_some() { this.thumbs_up_active( - &ThumbsUpActiveCompletion, + &ThumbsUpActivePrediction, window, cx, ); @@ -543,34 +717,32 @@ impl RateCompletionModal { fn render_shown_completions(&self, cx: &Context) -> impl Iterator { self.zeta .read(cx) - .shown_completions() + .shown_predictions() .cloned() .enumerate() .map(|(index, completion)| { let selected = self - .active_completion + .active_prediction .as_ref() - .is_some_and(|selected| selected.completion.id == completion.id); - let rated = self.zeta.read(cx).is_completion_rated(completion.id); + .is_some_and(|selected| selected.prediction.id == completion.id); + let rated = self.zeta.read(cx).is_prediction_rated(&completion.id); let (icon_name, icon_color, tooltip_text) = match (rated, completion.edits.is_empty()) { - (true, _) => (IconName::Check, Color::Success, "Rated Completion"), + (true, _) => (IconName::Check, Color::Success, "Rated Prediction"), (false, true) => (IconName::File, Color::Muted, "No Edits Produced"), (false, false) => (IconName::FileDiff, Color::Accent, "Edits Available"), }; - let file_name = completion - .path - .file_name() - .map(|f| f.to_string_lossy().into_owned()) - .unwrap_or("untitled".to_string()); - let file_path = completion - .path - .parent() - .map(|p| p.to_string_lossy().into_owned()); - - ListItem::new(completion.id) + let file = completion.buffer.read(cx).file(); + let file_name = file + .as_ref() + .map_or(SharedString::new_static("untitled"), |file| { + file.file_name(cx).to_string().into() + }); + let file_path = file.map(|file| file.path().as_unix_str().to_string()); + + ListItem::new(completion.id.clone()) .inset(true) .spacing(ListItemSpacing::Sparse) .focused(index == self.selected_index) @@ -615,12 +787,12 @@ impl RateCompletionModal { } } -impl Render for RateCompletionModal { +impl Render for RatePredictionsModal { fn render(&mut self, window: &mut Window, cx: &mut Context) -> impl IntoElement { let border_color = cx.theme().colors().border; h_flex() - .key_context("RateCompletionModal") + .key_context("RatePredictionModal") .track_focus(&self.focus_handle) .on_action(cx.listener(Self::dismiss)) .on_action(cx.listener(Self::confirm)) @@ -688,20 +860,20 @@ impl Render for RateCompletionModal { ), ), ) - .children(self.render_active_completion(cx)) + .children(self.render_active_completion(window, cx)) .on_mouse_down_out(cx.listener(|_, _, _, cx| cx.emit(DismissEvent))) } } -impl EventEmitter for RateCompletionModal {} +impl EventEmitter for RatePredictionsModal {} -impl Focusable for RateCompletionModal { +impl Focusable for RatePredictionsModal { fn focus_handle(&self, _cx: &App) -> FocusHandle { self.focus_handle.clone() } } -impl ModalView for RateCompletionModal {} +impl ModalView for RatePredictionsModal {} fn format_time_ago(elapsed: Duration) -> String { let seconds = elapsed.as_secs(); diff --git a/crates/zeta2/src/retrieval_search.rs b/crates/zeta/src/retrieval_search.rs similarity index 100% rename from crates/zeta2/src/retrieval_search.rs rename to crates/zeta/src/retrieval_search.rs diff --git a/crates/zeta2/src/sweep_ai.rs b/crates/zeta/src/sweep_ai.rs similarity index 77% rename from crates/zeta2/src/sweep_ai.rs rename to crates/zeta/src/sweep_ai.rs index c56d7409fa212734c5f5a73a6b24319c27c7494f..0e226ab9df26ffc945a2d8e810790d0b00d0f198 100644 --- a/crates/zeta2/src/sweep_ai.rs +++ b/crates/zeta/src/sweep_ai.rs @@ -2,7 +2,6 @@ use std::fmt; use std::{path::Path, sync::Arc}; use serde::{Deserialize, Serialize}; -use util::rel_path::RelPath; #[derive(Debug, Clone, Serialize)] pub struct AutocompleteRequest { @@ -91,34 +90,24 @@ pub struct AdditionalCompletion { pub finish_reason: Option, } -pub(crate) fn write_event(event: crate::Event, f: &mut impl fmt::Write) -> fmt::Result { +pub(crate) fn write_event( + event: &cloud_llm_client::predict_edits_v3::Event, + f: &mut impl fmt::Write, +) -> fmt::Result { match event { - crate::Event::BufferChange { - old_snapshot, - new_snapshot, + cloud_llm_client::predict_edits_v3::Event::BufferChange { + old_path, + path, + diff, .. } => { - let old_path = old_snapshot - .file() - .map(|f| f.path().as_ref()) - .unwrap_or(RelPath::unix("untitled").unwrap()); - let new_path = new_snapshot - .file() - .map(|f| f.path().as_ref()) - .unwrap_or(RelPath::unix("untitled").unwrap()); - if old_path != new_path { + if old_path != path { // TODO confirm how to do this for sweep // writeln!(f, "User renamed {:?} to {:?}\n", old_path, new_path)?; } - let diff = language::unified_diff(&old_snapshot.text(), &new_snapshot.text()); if !diff.is_empty() { - write!( - f, - "File: {}:\n{}\n", - new_path.display(util::paths::PathStyle::Posix), - diff - )? + write!(f, "File: {}:\n{}\n", path.display(), diff)? } fmt::Result::Ok(()) diff --git a/crates/zeta2/src/udiff.rs b/crates/zeta/src/udiff.rs similarity index 100% rename from crates/zeta2/src/udiff.rs rename to crates/zeta/src/udiff.rs diff --git a/crates/zeta2/src/xml_edits.rs b/crates/zeta/src/xml_edits.rs similarity index 100% rename from crates/zeta2/src/xml_edits.rs rename to crates/zeta/src/xml_edits.rs diff --git a/crates/zeta/src/zeta.rs b/crates/zeta/src/zeta.rs index 5b2c3856eda2cd984e6675d671f8c99aa183e883..6464ce19ebaf1f95ad58e2954fb68e934600dac4 100644 --- a/crates/zeta/src/zeta.rs +++ b/crates/zeta/src/zeta.rs @@ -1,130 +1,178 @@ -mod completion_diff_element; -mod init; -mod input_excerpt; -mod license_detection; -mod onboarding_modal; -mod onboarding_telemetry; -mod rate_completion_modal; - -pub(crate) use completion_diff_element::*; -use db::kvp::{Dismissable, KEY_VALUE_STORE}; -use db::smol::stream::StreamExt as _; -use edit_prediction::DataCollectionState; -use futures::channel::mpsc; -pub use init::*; -use license_detection::LicenseDetectionWatcher; -pub use rate_completion_modal::*; - -use anyhow::{Context as _, Result, anyhow}; +use anyhow::{Context as _, Result, anyhow, bail}; use arrayvec::ArrayVec; use client::{Client, EditPredictionUsage, UserStore}; +use cloud_llm_client::predict_edits_v3::{self, Event, PromptFormat, Signature}; use cloud_llm_client::{ AcceptEditPredictionBody, EXPIRED_LLM_TOKEN_HEADER_NAME, EditPredictionRejection, MAX_EDIT_PREDICTION_REJECTIONS_PER_REQUEST, MINIMUM_REQUIRED_VERSION_HEADER_NAME, - PredictEditsBody, PredictEditsGitInfo, PredictEditsResponse, RejectEditPredictionsBody, - ZED_VERSION_HEADER_NAME, + RejectEditPredictionsBody, ZED_VERSION_HEADER_NAME, }; -use collections::{HashMap, HashSet, VecDeque}; -use futures::AsyncReadExt; +use cloud_zeta2_prompt::retrieval_prompt::{SearchToolInput, SearchToolQuery}; +use cloud_zeta2_prompt::{CURSOR_MARKER, DEFAULT_MAX_PROMPT_BYTES}; +use collections::{HashMap, HashSet}; +use command_palette_hooks::CommandPaletteFilter; +use db::kvp::{Dismissable, KEY_VALUE_STORE}; +use edit_prediction_context::{ + DeclarationId, DeclarationStyle, EditPredictionContext, EditPredictionContextOptions, + EditPredictionExcerpt, EditPredictionExcerptOptions, EditPredictionScoreOptions, Line, + SyntaxIndex, SyntaxIndexState, +}; +use feature_flags::{FeatureFlag, FeatureFlagAppExt as _, PredictEditsRateCompletionsFeatureFlag}; +use futures::channel::{mpsc, oneshot}; +use futures::{AsyncReadExt as _, StreamExt as _}; use gpui::{ - App, AppContext as _, AsyncApp, Context, Entity, EntityId, Global, SharedString, Subscription, - Task, actions, + App, AsyncApp, Entity, EntityId, Global, SharedString, Subscription, Task, WeakEntity, actions, + http_client::{self, AsyncBody, Method}, + prelude::*, }; -use http_client::{AsyncBody, HttpClient, Method, Request, Response}; -use input_excerpt::excerpt_for_cursor_position; use language::{ - Anchor, Buffer, BufferSnapshot, EditPreview, File, OffsetRangeExt, ToOffset, ToPoint, text_diff, + Anchor, Buffer, DiagnosticSet, File, LanguageServerId, Point, ToOffset as _, ToPoint, }; +use language::{BufferSnapshot, OffsetRangeExt}; use language_model::{LlmApiToken, RefreshLlmTokenListener}; -use project::{Project, ProjectPath}; +use lsp::DiagnosticSeverity; +use open_ai::FunctionDefinition; +use project::{DisableAiSettings, Project, ProjectPath, WorktreeId}; use release_channel::AppVersion; use semver::Version; -use settings::WorktreeId; -use std::collections::hash_map; -use std::mem; -use std::str::FromStr; -use std::{ - cmp, - fmt::Write, - future::Future, - ops::Range, - path::Path, - rc::Rc, - sync::Arc, - time::{Duration, Instant}, -}; +use serde::de::DeserializeOwned; +use settings::{EditPredictionProvider, Settings as _, SettingsStore, update_settings_file}; +use std::any::{Any as _, TypeId}; +use std::collections::{VecDeque, hash_map}; use telemetry_events::EditPredictionRating; +use workspace::Workspace; + +use std::fmt::Write as _; +use std::ops::Range; +use std::path::Path; +use std::rc::Rc; +use std::str::FromStr as _; +use std::sync::{Arc, LazyLock}; +use std::time::{Duration, Instant}; +use std::{env, mem}; use thiserror::Error; -use util::ResultExt; -use util::rel_path::RelPath; -use uuid::Uuid; +use util::rel_path::RelPathBuf; +use util::{LogErrorFuture, RangeExt as _, ResultExt as _, TryFutureExt}; use workspace::notifications::{ErrorMessagePrompt, NotificationId, show_app_notification}; -use worktree::Worktree; - -const CURSOR_MARKER: &str = "<|user_cursor_is_here|>"; -const START_OF_FILE_MARKER: &str = "<|start_of_file|>"; -const EDITABLE_REGION_START_MARKER: &str = "<|editable_region_start|>"; -const EDITABLE_REGION_END_MARKER: &str = "<|editable_region_end|>"; -const BUFFER_CHANGE_GROUPING_INTERVAL: Duration = Duration::from_secs(1); -const ZED_PREDICT_DATA_COLLECTION_CHOICE: &str = "zed_predict_data_collection_choice"; -const MAX_CONTEXT_TOKENS: usize = 150; -const MAX_REWRITE_TOKENS: usize = 350; -const MAX_EVENT_TOKENS: usize = 500; +pub mod assemble_excerpts; +mod license_detection; +mod onboarding_modal; +mod prediction; +mod provider; +mod rate_prediction_modal; +pub mod retrieval_search; +mod sweep_ai; +pub mod udiff; +mod xml_edits; +pub mod zeta1; -/// Maximum number of events to track. -const MAX_EVENT_COUNT: usize = 16; +#[cfg(test)] +mod zeta_tests; + +use crate::assemble_excerpts::assemble_excerpts; +use crate::license_detection::LicenseDetectionWatcher; +use crate::onboarding_modal::ZedPredictModal; +pub use crate::prediction::EditPrediction; +pub use crate::prediction::EditPredictionId; +pub use crate::prediction::EditPredictionInputs; +use crate::rate_prediction_modal::{ + NextEdit, PreviousEdit, RatePredictionsModal, ThumbsDownActivePrediction, + ThumbsUpActivePrediction, +}; +use crate::zeta1::request_prediction_with_zeta1; +pub use provider::ZetaEditPredictionProvider; actions!( edit_prediction, [ + /// Resets the edit prediction onboarding state. + ResetOnboarding, + /// Opens the rate completions modal. + RateCompletions, /// Clears the edit prediction history. - ClearHistory + ClearHistory, ] ); -#[derive(Copy, Clone, Default, Debug, PartialEq, Eq, Hash)] -pub struct EditPredictionId(Uuid); +/// Maximum number of events to track. +const EVENT_COUNT_MAX: usize = 6; +const CHANGE_GROUPING_LINE_SPAN: u32 = 8; +const ZED_PREDICT_DATA_COLLECTION_CHOICE: &str = "zed_predict_data_collection_choice"; -impl From for gpui::ElementId { - fn from(value: EditPredictionId) -> Self { - gpui::ElementId::Uuid(value.0) - } -} +pub struct SweepFeatureFlag; -impl std::fmt::Display for EditPredictionId { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "{}", self.0) - } +impl FeatureFlag for SweepFeatureFlag { + const NAME: &str = "sweep-ai"; } +pub const DEFAULT_EXCERPT_OPTIONS: EditPredictionExcerptOptions = EditPredictionExcerptOptions { + max_bytes: 512, + min_bytes: 128, + target_before_cursor_over_total_bytes: 0.5, +}; -struct ZedPredictUpsell; +pub const DEFAULT_CONTEXT_OPTIONS: ContextMode = + ContextMode::Agentic(DEFAULT_AGENTIC_CONTEXT_OPTIONS); -impl Dismissable for ZedPredictUpsell { - const KEY: &'static str = "dismissed-edit-predict-upsell"; +pub const DEFAULT_AGENTIC_CONTEXT_OPTIONS: AgenticContextOptions = AgenticContextOptions { + excerpt: DEFAULT_EXCERPT_OPTIONS, +}; - fn dismissed() -> bool { - // To make this backwards compatible with older versions of Zed, we - // check if the user has seen the previous Edit Prediction Onboarding - // before, by checking the data collection choice which was written to - // the database once the user clicked on "Accept and Enable" - if KEY_VALUE_STORE - .read_kvp(ZED_PREDICT_DATA_COLLECTION_CHOICE) - .log_err() - .is_some_and(|s| s.is_some()) - { - return true; +pub const DEFAULT_SYNTAX_CONTEXT_OPTIONS: EditPredictionContextOptions = + EditPredictionContextOptions { + use_imports: true, + max_retrieved_declarations: 0, + excerpt: DEFAULT_EXCERPT_OPTIONS, + score: EditPredictionScoreOptions { + omit_excerpt_overlaps: true, + }, + }; + +pub const DEFAULT_OPTIONS: ZetaOptions = ZetaOptions { + context: DEFAULT_CONTEXT_OPTIONS, + max_prompt_bytes: DEFAULT_MAX_PROMPT_BYTES, + max_diagnostic_bytes: 2048, + prompt_format: PromptFormat::DEFAULT, + file_indexing_parallelism: 1, + buffer_change_grouping_interval: Duration::from_secs(1), +}; + +static USE_OLLAMA: LazyLock = + LazyLock::new(|| env::var("ZED_ZETA2_OLLAMA").is_ok_and(|var| !var.is_empty())); +static CONTEXT_RETRIEVAL_MODEL_ID: LazyLock = LazyLock::new(|| { + env::var("ZED_ZETA2_CONTEXT_MODEL").unwrap_or(if *USE_OLLAMA { + "qwen3-coder:30b".to_string() + } else { + "yqvev8r3".to_string() + }) +}); +static EDIT_PREDICTIONS_MODEL_ID: LazyLock = LazyLock::new(|| { + match env::var("ZED_ZETA2_MODEL").as_deref() { + Ok("zeta2-exp") => "4w5n28vw", // Fine-tuned model @ Baseten + Ok(model) => model, + Err(_) if *USE_OLLAMA => "qwen3-coder:30b", + Err(_) => "yqvev8r3", // Vanilla qwen3-coder @ Baseten + } + .to_string() +}); +static PREDICT_EDITS_URL: LazyLock> = LazyLock::new(|| { + env::var("ZED_PREDICT_EDITS_URL").ok().or_else(|| { + if *USE_OLLAMA { + Some("http://localhost:11434/v1/chat/completions".into()) + } else { + None } + }) +}); - KEY_VALUE_STORE - .read_kvp(Self::KEY) - .log_err() - .is_some_and(|s| s.is_some()) - } -} +pub struct Zeta2FeatureFlag; -pub fn should_show_upsell_modal() -> bool { - !ZedPredictUpsell::dismissed() +impl FeatureFlag for Zeta2FeatureFlag { + const NAME: &'static str = "zeta2"; + + fn enabled_for_staff() -> bool { + false + } } #[derive(Clone)] @@ -132,108 +180,291 @@ struct ZetaGlobal(Entity); impl Global for ZetaGlobal {} -#[derive(Clone)] -pub struct EditPrediction { - id: EditPredictionId, - path: Arc, - excerpt_range: Range, - cursor_offset: usize, - edits: Arc<[(Range, Arc)]>, - snapshot: BufferSnapshot, - edit_preview: EditPreview, - input_outline: Arc, - input_events: Arc, - input_excerpt: Arc, - output_excerpt: Arc, - buffer_snapshotted_at: Instant, - response_received_at: Instant, +pub struct Zeta { + client: Arc, + user_store: Entity, + llm_token: LlmApiToken, + _llm_token_subscription: Subscription, + projects: HashMap, + options: ZetaOptions, + update_required: bool, + debug_tx: Option>, + #[cfg(feature = "eval-support")] + eval_cache: Option>, + edit_prediction_model: ZetaEditPredictionModel, + sweep_api_token: Option, + sweep_ai_debug_info: Arc, + data_collection_choice: DataCollectionChoice, + rejected_predictions: Vec, + reject_predictions_tx: mpsc::UnboundedSender<()>, + reject_predictions_debounce_task: Option>, + shown_predictions: VecDeque, + rated_predictions: HashSet, } -impl EditPrediction { - fn latency(&self) -> Duration { - self.response_received_at - .duration_since(self.buffer_snapshotted_at) - } +#[derive(Default, PartialEq, Eq)] +pub enum ZetaEditPredictionModel { + #[default] + Zeta1, + Zeta2, + Sweep, +} - fn interpolate(&self, new_snapshot: &BufferSnapshot) -> Option, Arc)>> { - edit_prediction::interpolate_edits(&self.snapshot, new_snapshot, &self.edits) - } +#[derive(Debug, Clone, PartialEq)] +pub struct ZetaOptions { + pub context: ContextMode, + pub max_prompt_bytes: usize, + pub max_diagnostic_bytes: usize, + pub prompt_format: predict_edits_v3::PromptFormat, + pub file_indexing_parallelism: usize, + pub buffer_change_grouping_interval: Duration, } -impl std::fmt::Debug for EditPrediction { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - f.debug_struct("EditPrediction") - .field("id", &self.id) - .field("path", &self.path) - .field("edits", &self.edits) - .finish_non_exhaustive() +#[derive(Debug, Clone, PartialEq)] +pub enum ContextMode { + Agentic(AgenticContextOptions), + Syntax(EditPredictionContextOptions), +} + +#[derive(Debug, Clone, PartialEq)] +pub struct AgenticContextOptions { + pub excerpt: EditPredictionExcerptOptions, +} + +impl ContextMode { + pub fn excerpt(&self) -> &EditPredictionExcerptOptions { + match self { + ContextMode::Agentic(options) => &options.excerpt, + ContextMode::Syntax(options) => &options.excerpt, + } } } -pub struct Zeta { - projects: HashMap, - client: Arc, - shown_completions: VecDeque, - rated_completions: HashSet, - data_collection_choice: DataCollectionChoice, - discarded_completions: Vec, - llm_token: LlmApiToken, - _llm_token_subscription: Subscription, - /// Whether an update to a newer version of Zed is required to continue using Zeta. - update_required: bool, - user_store: Entity, - license_detection_watchers: HashMap>, - discard_completions_debounce_task: Option>, - discard_completions_tx: mpsc::UnboundedSender<()>, +#[derive(Debug)] +pub enum ZetaDebugInfo { + ContextRetrievalStarted(ZetaContextRetrievalStartedDebugInfo), + SearchQueriesGenerated(ZetaSearchQueryDebugInfo), + SearchQueriesExecuted(ZetaContextRetrievalDebugInfo), + ContextRetrievalFinished(ZetaContextRetrievalDebugInfo), + EditPredictionRequested(ZetaEditPredictionDebugInfo), +} + +#[derive(Debug)] +pub struct ZetaContextRetrievalStartedDebugInfo { + pub project: Entity, + pub timestamp: Instant, + pub search_prompt: String, +} + +#[derive(Debug)] +pub struct ZetaContextRetrievalDebugInfo { + pub project: Entity, + pub timestamp: Instant, +} + +#[derive(Debug)] +pub struct ZetaEditPredictionDebugInfo { + pub inputs: EditPredictionInputs, + pub retrieval_time: Duration, + pub buffer: WeakEntity, + pub position: language::Anchor, + pub local_prompt: Result, + pub response_rx: oneshot::Receiver<(Result, Duration)>, +} + +#[derive(Debug)] +pub struct ZetaSearchQueryDebugInfo { + pub project: Entity, + pub timestamp: Instant, + pub search_queries: Vec, } +pub type RequestDebugInfo = predict_edits_v3::DebugInfo; + struct ZetaProject { - events: VecDeque, + syntax_index: Option>, + events: VecDeque>, + last_event: Option, + recent_paths: VecDeque, registered_buffers: HashMap, + current_prediction: Option, + next_pending_prediction_id: usize, + pending_predictions: ArrayVec, + last_prediction_refresh: Option<(EntityId, Instant)>, + context: Option, Vec>>>, + refresh_context_task: Option>>>, + refresh_context_debounce_task: Option>>, + refresh_context_timestamp: Option, + license_detection_watchers: HashMap>, + _subscription: gpui::Subscription, } -impl Zeta { - pub fn global(cx: &mut App) -> Option> { - cx.try_global::().map(|global| global.0.clone()) +impl ZetaProject { + pub fn events(&self, cx: &App) -> Vec> { + self.events + .iter() + .cloned() + .chain( + self.last_event + .as_ref() + .and_then(|event| event.finalize(&self.license_detection_watchers, cx)), + ) + .collect() } +} - pub fn register( - worktree: Option>, - client: Arc, - user_store: Entity, - cx: &mut App, - ) -> Entity { - let this = Self::global(cx).unwrap_or_else(|| { - let entity = cx.new(|cx| Self::new(client, user_store, cx)); - cx.set_global(ZetaGlobal(entity.clone())); - entity - }); +#[derive(Debug, Clone)] +struct CurrentEditPrediction { + pub requested_by: PredictionRequestedBy, + pub prediction: EditPrediction, + pub was_shown: bool, +} - this.update(cx, move |this, cx| { - if let Some(worktree) = worktree { - let worktree_id = worktree.read(cx).id(); - this.license_detection_watchers - .entry(worktree_id) - .or_insert_with(|| Rc::new(LicenseDetectionWatcher::new(&worktree, cx))); - } - }); +impl CurrentEditPrediction { + fn should_replace_prediction(&self, old_prediction: &Self, cx: &App) -> bool { + let Some(new_edits) = self + .prediction + .interpolate(&self.prediction.buffer.read(cx)) + else { + return false; + }; + + if self.prediction.buffer != old_prediction.prediction.buffer { + return true; + } + + let Some(old_edits) = old_prediction + .prediction + .interpolate(&old_prediction.prediction.buffer.read(cx)) + else { + return true; + }; - this + let requested_by_buffer_id = self.requested_by.buffer_id(); + + // This reduces the occurrence of UI thrash from replacing edits + // + // TODO: This is fairly arbitrary - should have a more general heuristic that handles multiple edits. + if requested_by_buffer_id == Some(self.prediction.buffer.entity_id()) + && requested_by_buffer_id == Some(old_prediction.prediction.buffer.entity_id()) + && old_edits.len() == 1 + && new_edits.len() == 1 + { + let (old_range, old_text) = &old_edits[0]; + let (new_range, new_text) = &new_edits[0]; + new_range == old_range && new_text.starts_with(old_text.as_ref()) + } else { + true + } } +} - pub fn clear_history(&mut self) { - for zeta_project in self.projects.values_mut() { - zeta_project.events.clear(); +#[derive(Debug, Clone)] +enum PredictionRequestedBy { + DiagnosticsUpdate, + Buffer(EntityId), +} + +impl PredictionRequestedBy { + pub fn buffer_id(&self) -> Option { + match self { + PredictionRequestedBy::DiagnosticsUpdate => None, + PredictionRequestedBy::Buffer(buffer_id) => Some(*buffer_id), } } +} - pub fn usage(&self, cx: &App) -> Option { - self.user_store.read(cx).edit_prediction_usage() +struct PendingPrediction { + id: usize, + task: Task>, +} + +/// A prediction from the perspective of a buffer. +#[derive(Debug)] +enum BufferEditPrediction<'a> { + Local { prediction: &'a EditPrediction }, + Jump { prediction: &'a EditPrediction }, +} + +struct RegisteredBuffer { + snapshot: BufferSnapshot, + _subscriptions: [gpui::Subscription; 2], +} + +struct LastEvent { + old_snapshot: BufferSnapshot, + new_snapshot: BufferSnapshot, + end_edit_anchor: Option, +} + +impl LastEvent { + pub fn finalize( + &self, + license_detection_watchers: &HashMap>, + cx: &App, + ) -> Option> { + let path = buffer_path_with_id_fallback(&self.new_snapshot, cx); + let old_path = buffer_path_with_id_fallback(&self.old_snapshot, cx); + + let file = self.new_snapshot.file(); + let old_file = self.old_snapshot.file(); + + let in_open_source_repo = [file, old_file].iter().all(|file| { + file.is_some_and(|file| { + license_detection_watchers + .get(&file.worktree_id(cx)) + .is_some_and(|watcher| watcher.is_project_open_source()) + }) + }); + + let diff = language::unified_diff(&self.old_snapshot.text(), &self.new_snapshot.text()); + + if path == old_path && diff.is_empty() { + None + } else { + Some(Arc::new(predict_edits_v3::Event::BufferChange { + old_path, + path, + diff, + in_open_source_repo, + // TODO: Actually detect if this edit was predicted or not + predicted: false, + })) + } + } +} + +fn buffer_path_with_id_fallback(snapshot: &BufferSnapshot, cx: &App) -> Arc { + if let Some(file) = snapshot.file() { + file.full_path(cx).into() + } else { + Path::new(&format!("untitled-{}", snapshot.remote_id())).into() + } +} + +impl Zeta { + pub fn try_global(cx: &App) -> Option> { + cx.try_global::().map(|global| global.0.clone()) + } + + pub fn global( + client: &Arc, + user_store: &Entity, + cx: &mut App, + ) -> Entity { + cx.try_global::() + .map(|global| global.0.clone()) + .unwrap_or_else(|| { + let zeta = cx.new(|cx| Self::new(client.clone(), user_store.clone(), cx)); + cx.set_global(ZetaGlobal(zeta.clone())); + zeta + }) } - fn new(client: Arc, user_store: Entity, cx: &mut Context) -> Self { + pub fn new(client: Arc, user_store: Entity, cx: &mut Context) -> Self { let refresh_llm_token_listener = RefreshLlmTokenListener::global(cx); let data_collection_choice = Self::load_data_collection_choice(); + let (reject_tx, mut reject_rx) = mpsc::unbounded(); cx.spawn(async move |this, cx| { while let Some(()) = reject_rx.next().await { @@ -248,12 +479,8 @@ impl Zeta { Self { projects: HashMap::default(), client, - shown_completions: VecDeque::new(), - rated_completions: HashSet::default(), - discarded_completions: Vec::new(), - discard_completions_debounce_task: None, - discard_completions_tx: reject_tx, - data_collection_choice, + user_store, + options: DEFAULT_OPTIONS, llm_token: LlmApiToken::default(), _llm_token_subscription: cx.subscribe( &refresh_llm_token_listener, @@ -268,64 +495,85 @@ impl Zeta { }, ), update_required: false, - license_detection_watchers: HashMap::default(), - user_store, + debug_tx: None, + #[cfg(feature = "eval-support")] + eval_cache: None, + edit_prediction_model: ZetaEditPredictionModel::Zeta2, + sweep_api_token: std::env::var("SWEEP_AI_TOKEN") + .context("No SWEEP_AI_TOKEN environment variable set") + .log_err(), + data_collection_choice, + sweep_ai_debug_info: sweep_ai::debug_info(cx), + rejected_predictions: Vec::new(), + reject_predictions_debounce_task: None, + reject_predictions_tx: reject_tx, + rated_predictions: Default::default(), + shown_predictions: Default::default(), } } - fn get_or_init_zeta_project( - &mut self, - project: &Entity, - cx: &mut Context, - ) -> &mut ZetaProject { - let project_id = project.entity_id(); - match self.projects.entry(project_id) { - hash_map::Entry::Occupied(entry) => entry.into_mut(), - hash_map::Entry::Vacant(entry) => { - cx.observe_release(project, move |this, _, _cx| { - this.projects.remove(&project_id); - }) - .detach(); - entry.insert(ZetaProject { - events: VecDeque::with_capacity(MAX_EVENT_COUNT), - registered_buffers: HashMap::default(), - }) - } - } + pub fn set_edit_prediction_model(&mut self, model: ZetaEditPredictionModel) { + self.edit_prediction_model = model; } - fn push_event(zeta_project: &mut ZetaProject, event: Event) { - let events = &mut zeta_project.events; + pub fn has_sweep_api_token(&self) -> bool { + self.sweep_api_token.is_some() + } - if let Some(Event::BufferChange { - new_snapshot: last_new_snapshot, - timestamp: last_timestamp, - .. - }) = events.back_mut() - { - // Coalesce edits for the same buffer when they happen one after the other. - let Event::BufferChange { - old_snapshot, - new_snapshot, - timestamp, - } = &event; - - if timestamp.duration_since(*last_timestamp) <= BUFFER_CHANGE_GROUPING_INTERVAL - && old_snapshot.remote_id() == last_new_snapshot.remote_id() - && old_snapshot.version == last_new_snapshot.version - { - *last_new_snapshot = new_snapshot.clone(); - *last_timestamp = *timestamp; - return; - } + #[cfg(feature = "eval-support")] + pub fn with_eval_cache(&mut self, cache: Arc) { + self.eval_cache = Some(cache); + } + + pub fn debug_info(&mut self) -> mpsc::UnboundedReceiver { + let (debug_watch_tx, debug_watch_rx) = mpsc::unbounded(); + self.debug_tx = Some(debug_watch_tx); + debug_watch_rx + } + + pub fn options(&self) -> &ZetaOptions { + &self.options + } + + pub fn set_options(&mut self, options: ZetaOptions) { + self.options = options; + } + + pub fn clear_history(&mut self) { + for zeta_project in self.projects.values_mut() { + zeta_project.events.clear(); } + } + + pub fn context_for_project( + &self, + project: &Entity, + ) -> impl Iterator, &[Range])> { + self.projects + .get(&project.entity_id()) + .and_then(|project| { + Some( + project + .context + .as_ref()? + .iter() + .map(|(buffer, ranges)| (buffer.clone(), ranges.as_slice())), + ) + }) + .into_iter() + .flatten() + } - if events.len() >= MAX_EVENT_COUNT { - // These are halved instead of popping to improve prompt caching. - events.drain(..MAX_EVENT_COUNT / 2); + pub fn usage(&self, cx: &App) -> Option { + if self.edit_prediction_model == ZetaEditPredictionModel::Zeta2 { + self.user_store.read(cx).edit_prediction_usage() + } else { + None } + } - events.push_back(event); + pub fn register_project(&mut self, project: &Entity, cx: &mut Context) { + self.get_or_init_zeta_project(project, cx); } pub fn register_buffer( @@ -338,6 +586,69 @@ impl Zeta { Self::register_buffer_impl(zeta_project, buffer, project, cx); } + fn get_or_init_zeta_project( + &mut self, + project: &Entity, + cx: &mut Context, + ) -> &mut ZetaProject { + self.projects + .entry(project.entity_id()) + .or_insert_with(|| ZetaProject { + syntax_index: if let ContextMode::Syntax(_) = &self.options.context { + Some(cx.new(|cx| { + SyntaxIndex::new(project, self.options.file_indexing_parallelism, cx) + })) + } else { + None + }, + events: VecDeque::new(), + last_event: None, + recent_paths: VecDeque::new(), + registered_buffers: HashMap::default(), + current_prediction: None, + pending_predictions: ArrayVec::new(), + next_pending_prediction_id: 0, + last_prediction_refresh: None, + context: None, + refresh_context_task: None, + refresh_context_debounce_task: None, + refresh_context_timestamp: None, + license_detection_watchers: HashMap::default(), + _subscription: cx.subscribe(&project, Self::handle_project_event), + }) + } + + fn handle_project_event( + &mut self, + project: Entity, + event: &project::Event, + cx: &mut Context, + ) { + // TODO [zeta2] init with recent paths + match event { + project::Event::ActiveEntryChanged(Some(active_entry_id)) => { + let Some(zeta_project) = self.projects.get_mut(&project.entity_id()) else { + return; + }; + let path = project.read(cx).path_for_entry(*active_entry_id, cx); + if let Some(path) = path { + if let Some(ix) = zeta_project + .recent_paths + .iter() + .position(|probe| probe == &path) + { + zeta_project.recent_paths.remove(ix); + } + zeta_project.recent_paths.push_front(path); + } + } + project::Event::DiagnosticsUpdated { .. } => { + self.refresh_prediction_from_diagnostics(project, cx); + } + _ => (), + } + } + fn register_buffer_impl<'a>( zeta_project: &'a mut ZetaProject, buffer: &Entity, @@ -345,6 +656,28 @@ impl Zeta { cx: &mut Context, ) -> &'a mut RegisteredBuffer { let buffer_id = buffer.entity_id(); + + if let Some(file) = buffer.read(cx).file() { + let worktree_id = file.worktree_id(cx); + if let Some(worktree) = project.read(cx).worktree_for_id(worktree_id, cx) { + zeta_project + .license_detection_watchers + .entry(worktree_id) + .or_insert_with(|| { + let project_entity_id = project.entity_id(); + cx.observe_release(&worktree, move |this, _worktree, _cx| { + let Some(zeta_project) = this.projects.get_mut(&project_entity_id) + else { + return; + }; + zeta_project.license_detection_watchers.remove(&worktree_id); + }) + .detach(); + Rc::new(LicenseDetectionWatcher::new(&worktree, cx)) + }); + } + } + match zeta_project.registered_buffers.entry(buffer_id) { hash_map::Entry::Occupied(entry) => entry.into_mut(), hash_map::Entry::Vacant(entry) => { @@ -376,2037 +709,2755 @@ impl Zeta { } } - fn request_completion_impl( + fn report_changes_for_buffer( &mut self, - project: &Entity, buffer: &Entity, - cursor: language::Anchor, + project: &Entity, cx: &mut Context, - perform_predict_edits: F, - ) -> Task>> - where - F: FnOnce(PerformPredictEditsParams) -> R + 'static, - R: Future)>> - + Send - + 'static, - { - let buffer = buffer.clone(); - let buffer_snapshotted_at = Instant::now(); - let snapshot = self.report_changes_for_buffer(&buffer, project, cx); - let zeta = cx.entity(); - let client = self.client.clone(); - let llm_token = self.llm_token.clone(); - let app_version = AppVersion::global(cx); - - let zeta_project = self.get_or_init_zeta_project(project, cx); - let mut events = Vec::with_capacity(zeta_project.events.len()); - events.extend(zeta_project.events.iter().cloned()); - let events = Arc::new(events); - - let (git_info, can_collect_file) = if let Some(file) = snapshot.file() { - let can_collect_file = self.can_collect_file(file, cx); - let git_info = if can_collect_file { - git_info_for_file(project, &ProjectPath::from_file(file.as_ref(), cx), cx) - } else { - None - }; - (git_info, can_collect_file) - } else { - (None, false) - }; - - let full_path: Arc = snapshot - .file() - .map(|f| Arc::from(f.full_path(cx).as_path())) - .unwrap_or_else(|| Arc::from(Path::new("untitled"))); - let full_path_str = full_path.to_string_lossy().into_owned(); - let cursor_point = cursor.to_point(&snapshot); - let cursor_offset = cursor_point.to_offset(&snapshot); - let prompt_for_events = { - let events = events.clone(); - move || prompt_for_events_impl(&events, MAX_EVENT_TOKENS) - }; - let gather_task = gather_context( - full_path_str, - &snapshot, - cursor_point, - prompt_for_events, - cx, - ); - - cx.spawn(async move |this, cx| { - let GatherContextOutput { - mut body, - editable_range, - included_events_count, - } = gather_task.await?; - let done_gathering_context_at = Instant::now(); - - let included_events = &events[events.len() - included_events_count..events.len()]; - body.can_collect_data = can_collect_file - && this - .read_with(cx, |this, cx| this.can_collect_events(included_events, cx)) - .unwrap_or(false); - if body.can_collect_data { - body.git_info = git_info; - } - - log::debug!( - "Events:\n{}\nExcerpt:\n{:?}", - body.input_events, - body.input_excerpt - ); - - let input_outline = body.outline.clone().unwrap_or_default(); - let input_events = body.input_events.clone(); - let input_excerpt = body.input_excerpt.clone(); - - let response = perform_predict_edits(PerformPredictEditsParams { - client, - llm_token, - app_version, - body, - }) - .await; - let (response, usage) = match response { - Ok(response) => response, - Err(err) => { - if err.is::() { - cx.update(|cx| { - zeta.update(cx, |zeta, _cx| { - zeta.update_required = true; - }); - - let error_message: SharedString = err.to_string().into(); - show_app_notification( - NotificationId::unique::(), - cx, - move |cx| { - cx.new(|cx| { - ErrorMessagePrompt::new(error_message.clone(), cx) - .with_link_button( - "Update Zed", - "https://zed.dev/releases", - ) - }) - }, - ); - }) - .ok(); - } + ) { + let project_state = self.get_or_init_zeta_project(project, cx); + let registered_buffer = Self::register_buffer_impl(project_state, buffer, project, cx); - return Err(err); - } - }; + let new_snapshot = buffer.read(cx).snapshot(); + if new_snapshot.version == registered_buffer.snapshot.version { + return; + } - let received_response_at = Instant::now(); - log::debug!("completion response: {}", &response.output_excerpt); + let old_snapshot = mem::replace(&mut registered_buffer.snapshot, new_snapshot.clone()); + let end_edit_anchor = new_snapshot + .anchored_edits_since::(&old_snapshot.version) + .last() + .map(|(_, range)| range.end); + let events = &mut project_state.events; - if let Some(usage) = usage { - this.update(cx, |this, cx| { - this.user_store.update(cx, |user_store, cx| { - user_store.update_edit_prediction_usage(usage, cx); + if let Some(LastEvent { + new_snapshot: last_new_snapshot, + end_edit_anchor: last_end_edit_anchor, + .. + }) = project_state.last_event.as_mut() + { + let is_next_snapshot_of_same_buffer = old_snapshot.remote_id() + == last_new_snapshot.remote_id() + && old_snapshot.version == last_new_snapshot.version; + + let should_coalesce = is_next_snapshot_of_same_buffer + && end_edit_anchor + .as_ref() + .zip(last_end_edit_anchor.as_ref()) + .is_some_and(|(a, b)| { + let a = a.to_point(&new_snapshot); + let b = b.to_point(&new_snapshot); + a.row.abs_diff(b.row) <= CHANGE_GROUPING_LINE_SPAN }); - }) - .ok(); + + if should_coalesce { + *last_end_edit_anchor = end_edit_anchor; + *last_new_snapshot = new_snapshot; + return; } + } - let edit_prediction = Self::process_completion_response( - response, - buffer, - &snapshot, - editable_range, - cursor_offset, - full_path, - input_outline, - input_events, - input_excerpt, - buffer_snapshotted_at, - cx, - ) - .await; + if events.len() + 1 >= EVENT_COUNT_MAX { + events.pop_front(); + } - let finished_at = Instant::now(); - - // record latency for ~1% of requests - if rand::random::() <= 2 { - telemetry::event!( - "Edit Prediction Request", - context_latency = done_gathering_context_at - .duration_since(buffer_snapshotted_at) - .as_millis(), - request_latency = received_response_at - .duration_since(done_gathering_context_at) - .as_millis(), - process_latency = finished_at.duration_since(received_response_at).as_millis() - ); - } + if let Some(event) = project_state.last_event.take() { + events.extend(event.finalize(&project_state.license_detection_watchers, cx)); + } - edit_prediction - }) + project_state.last_event = Some(LastEvent { + old_snapshot, + new_snapshot, + end_edit_anchor, + }); } - #[cfg(any(test, feature = "test-support"))] - pub fn fake_completion( - &mut self, - project: &Entity, + fn current_prediction_for_buffer( + &self, buffer: &Entity, - position: language::Anchor, - response: PredictEditsResponse, - cx: &mut Context, - ) -> Task>> { - self.request_completion_impl(project, buffer, position, cx, |_params| { - std::future::ready(Ok((response, None))) - }) - } - - pub fn request_completion( - &mut self, project: &Entity, - buffer: &Entity, - position: language::Anchor, - cx: &mut Context, - ) -> Task>> { - self.request_completion_impl(project, buffer, position, cx, Self::perform_predict_edits) - } - - pub fn perform_predict_edits( - params: PerformPredictEditsParams, - ) -> impl Future)>> { - async move { - let PerformPredictEditsParams { - client, - llm_token, - app_version, - body, - .. - } = params; - - let http_client = client.http_client(); - let mut token = llm_token.acquire(&client).await?; - let mut did_retry = false; - - loop { - let request_builder = http_client::Request::builder().method(Method::POST); - let request_builder = - if let Ok(predict_edits_url) = std::env::var("ZED_PREDICT_EDITS_URL") { - request_builder.uri(predict_edits_url) - } else { - request_builder.uri( - http_client - .build_zed_llm_url("/predict_edits/v2", &[])? - .as_ref(), - ) - }; - let request = request_builder - .header("Content-Type", "application/json") - .header("Authorization", format!("Bearer {}", token)) - .header(ZED_VERSION_HEADER_NAME, app_version.to_string()) - .body(serde_json::to_string(&body)?.into())?; + cx: &App, + ) -> Option> { + let project_state = self.projects.get(&project.entity_id())?; - let mut response = http_client.send(request).await?; + let CurrentEditPrediction { + requested_by, + prediction, + .. + } = project_state.current_prediction.as_ref()?; - if let Some(minimum_required_version) = response - .headers() - .get(MINIMUM_REQUIRED_VERSION_HEADER_NAME) - .and_then(|version| Version::from_str(version.to_str().ok()?).ok()) - { - anyhow::ensure!( - app_version >= minimum_required_version, - ZedUpdateRequiredError { - minimum_version: minimum_required_version - } - ); + if prediction.targets_buffer(buffer.read(cx)) { + Some(BufferEditPrediction::Local { prediction }) + } else { + let show_jump = match requested_by { + PredictionRequestedBy::Buffer(requested_by_buffer_id) => { + requested_by_buffer_id == &buffer.entity_id() } + PredictionRequestedBy::DiagnosticsUpdate => true, + }; - if response.status().is_success() { - let usage = EditPredictionUsage::from_headers(response.headers()).ok(); - - let mut body = String::new(); - response.body_mut().read_to_string(&mut body).await?; - return Ok((serde_json::from_str(&body)?, usage)); - } else if !did_retry - && response - .headers() - .get(EXPIRED_LLM_TOKEN_HEADER_NAME) - .is_some() - { - did_retry = true; - token = llm_token.refresh(&client).await?; - } else { - let mut body = String::new(); - response.body_mut().read_to_string(&mut body).await?; - anyhow::bail!( - "error predicting edits.\nStatus: {:?}\nBody: {}", - response.status(), - body - ); - } + if show_jump { + Some(BufferEditPrediction::Jump { prediction }) + } else { + None } } } - fn accept_edit_prediction( - &mut self, - request_id: EditPredictionId, - cx: &mut Context, - ) -> Task> { + fn accept_current_prediction(&mut self, project: &Entity, cx: &mut Context) { + match self.edit_prediction_model { + ZetaEditPredictionModel::Zeta1 | ZetaEditPredictionModel::Zeta2 => {} + ZetaEditPredictionModel::Sweep => return, + } + + let Some(project_state) = self.projects.get_mut(&project.entity_id()) else { + return; + }; + + let Some(prediction) = project_state.current_prediction.take() else { + return; + }; + let request_id = prediction.prediction.id.to_string(); + for pending_prediction in mem::take(&mut project_state.pending_predictions) { + self.cancel_pending_prediction(pending_prediction, cx); + } + let client = self.client.clone(); let llm_token = self.llm_token.clone(); let app_version = AppVersion::global(cx); cx.spawn(async move |this, cx| { - let http_client = client.http_client(); - let mut response = llm_token_retry(&llm_token, &client, |token| { - let request_builder = http_client::Request::builder().method(Method::POST); - let request_builder = - if let Ok(accept_prediction_url) = std::env::var("ZED_ACCEPT_PREDICTION_URL") { - request_builder.uri(accept_prediction_url) - } else { - request_builder.uri( - http_client - .build_zed_llm_url("/predict_edits/accept", &[])? - .as_ref(), - ) - }; - Ok(request_builder - .header("Content-Type", "application/json") - .header("Authorization", format!("Bearer {}", token)) - .header(ZED_VERSION_HEADER_NAME, app_version.to_string()) - .body( - serde_json::to_string(&AcceptEditPredictionBody { - request_id: request_id.0.to_string(), - })? - .into(), - )?) - }) - .await?; - - if let Some(minimum_required_version) = response - .headers() - .get(MINIMUM_REQUIRED_VERSION_HEADER_NAME) - .and_then(|version| Version::from_str(version.to_str().ok()?).ok()) - && app_version < minimum_required_version - { - return Err(anyhow!(ZedUpdateRequiredError { - minimum_version: minimum_required_version - })); - } - - if response.status().is_success() { - if let Some(usage) = EditPredictionUsage::from_headers(response.headers()).ok() { - this.update(cx, |this, cx| { - this.user_store.update(cx, |user_store, cx| { - user_store.update_edit_prediction_usage(usage, cx); - }); - })?; - } - - Ok(()) + let url = if let Ok(predict_edits_url) = env::var("ZED_ACCEPT_PREDICTION_URL") { + http_client::Url::parse(&predict_edits_url)? } else { - let mut body = String::new(); - response.body_mut().read_to_string(&mut body).await?; - Err(anyhow!( - "error accepting edit prediction.\nStatus: {:?}\nBody: {}", - response.status(), - body + client + .http_client() + .build_zed_llm_url("/predict_edits/accept", &[])? + }; + + let response = cx + .background_spawn(Self::send_api_request::<()>( + move |builder| { + let req = builder.uri(url.as_ref()).body( + serde_json::to_string(&AcceptEditPredictionBody { + request_id: request_id.clone(), + })? + .into(), + ); + Ok(req?) + }, + client, + llm_token, + app_version, )) - } + .await; + + Self::handle_api_response(&this, response, cx)?; + anyhow::Ok(()) }) + .detach_and_log_err(cx); } fn reject_edit_predictions(&mut self, cx: &mut Context) -> Task> { + match self.edit_prediction_model { + ZetaEditPredictionModel::Zeta1 | ZetaEditPredictionModel::Zeta2 => {} + ZetaEditPredictionModel::Sweep => return Task::ready(anyhow::Ok(())), + } + let client = self.client.clone(); let llm_token = self.llm_token.clone(); let app_version = AppVersion::global(cx); - let last_rejection = self.discarded_completions.last().cloned(); - let body = serde_json::to_string(&RejectEditPredictionsBody { - rejections: self.discarded_completions.clone(), - }) - .ok(); - + let last_rejection = self.rejected_predictions.last().cloned(); let Some(last_rejection) = last_rejection else { return Task::ready(anyhow::Ok(())); }; + let body = serde_json::to_string(&RejectEditPredictionsBody { + rejections: self.rejected_predictions.clone(), + }) + .ok(); + cx.spawn(async move |this, cx| { - let http_client = client.http_client(); - let mut response = llm_token_retry(&llm_token, &client, |token| { - let request_builder = http_client::Request::builder().method(Method::POST); - let request_builder = request_builder.uri( - http_client - .build_zed_llm_url("/predict_edits/reject", &[])? - .as_ref(), - ); - Ok(request_builder - .header("Content-Type", "application/json") - .header("Authorization", format!("Bearer {}", token)) - .header(ZED_VERSION_HEADER_NAME, app_version.to_string()) - .body( - body.as_ref() - .context("failed to serialize body")? - .clone() - .into(), - )?) + let url = client + .http_client() + .build_zed_llm_url("/predict_edits/reject", &[])?; + + cx.background_spawn(Self::send_api_request::<()>( + move |builder| { + let req = builder.uri(url.as_ref()).body(body.clone().into()); + Ok(req?) + }, + client, + llm_token, + app_version, + )) + .await + .context("Failed to reject edit predictions")?; + + this.update(cx, |this, _| { + if let Some(ix) = this + .rejected_predictions + .iter() + .position(|rejection| rejection.request_id == last_rejection.request_id) + { + this.rejected_predictions.drain(..ix + 1); + } }) - .await?; + }) + } - if let Some(minimum_required_version) = response - .headers() - .get(MINIMUM_REQUIRED_VERSION_HEADER_NAME) - .and_then(|version| Version::from_str(version.to_str().ok()?).ok()) - && app_version < minimum_required_version - { - return Err(anyhow!(ZedUpdateRequiredError { - minimum_version: minimum_required_version - })); + fn discard_current_prediction(&mut self, project: &Entity, cx: &mut Context) { + if let Some(project_state) = self.projects.get_mut(&project.entity_id()) { + project_state.pending_predictions.clear(); + if let Some(prediction) = project_state.current_prediction.take() { + self.discard_prediction(prediction.prediction.id, prediction.was_shown, cx); } + }; + } - if response.status().is_success() { - this.update(cx, |this, _| { - if let Some(ix) = this - .discarded_completions - .iter() - .position(|rejection| rejection.request_id == last_rejection.request_id) - { - this.discarded_completions.drain(..ix + 1); + fn did_show_current_prediction(&mut self, project: &Entity, _cx: &mut Context) { + if let Some(project_state) = self.projects.get_mut(&project.entity_id()) { + if let Some(current_prediction) = project_state.current_prediction.as_mut() { + if !current_prediction.was_shown { + current_prediction.was_shown = true; + self.shown_predictions + .push_front(current_prediction.prediction.clone()); + if self.shown_predictions.len() > 50 { + let completion = self.shown_predictions.pop_back().unwrap(); + self.rated_predictions.remove(&completion.id); } - }) - } else { - let mut body = String::new(); - response.body_mut().read_to_string(&mut body).await?; - Err(anyhow!( - "error rejecting edit predictions.\nStatus: {:?}\nBody: {}", - response.status(), - body - )) + } + } + } + } + + fn discard_prediction( + &mut self, + prediction_id: EditPredictionId, + was_shown: bool, + cx: &mut Context, + ) { + self.rejected_predictions.push(EditPredictionRejection { + request_id: prediction_id.to_string(), + was_shown, + }); + + let reached_request_limit = + self.rejected_predictions.len() >= MAX_EDIT_PREDICTION_REJECTIONS_PER_REQUEST; + let reject_tx = self.reject_predictions_tx.clone(); + self.reject_predictions_debounce_task = Some(cx.spawn(async move |_this, cx| { + const DISCARD_COMPLETIONS_DEBOUNCE: Duration = Duration::from_secs(15); + if !reached_request_limit { + cx.background_executor() + .timer(DISCARD_COMPLETIONS_DEBOUNCE) + .await; } + reject_tx.unbounded_send(()).log_err(); + })); + } + + fn cancel_pending_prediction( + &self, + pending_prediction: PendingPrediction, + cx: &mut Context, + ) { + cx.spawn(async move |this, cx| { + let Some(prediction_id) = pending_prediction.task.await else { + return; + }; + + this.update(cx, |this, cx| { + this.discard_prediction(prediction_id, false, cx); + }) + .ok(); }) + .detach() + } + + fn is_refreshing(&self, project: &Entity) -> bool { + self.projects + .get(&project.entity_id()) + .is_some_and(|project_state| !project_state.pending_predictions.is_empty()) } - fn process_completion_response( - prediction_response: PredictEditsResponse, + pub fn refresh_prediction_from_buffer( + &mut self, + project: Entity, buffer: Entity, - snapshot: &BufferSnapshot, - editable_range: Range, - cursor_offset: usize, - path: Arc, - input_outline: String, - input_events: String, - input_excerpt: String, - buffer_snapshotted_at: Instant, - cx: &AsyncApp, - ) -> Task>> { - let snapshot = snapshot.clone(); - let request_id = prediction_response.request_id; - let output_excerpt = prediction_response.output_excerpt; - cx.spawn(async move |cx| { - let output_excerpt: Arc = output_excerpt.into(); - - let edits: Arc<[(Range, Arc)]> = cx - .background_spawn({ - let output_excerpt = output_excerpt.clone(); - let editable_range = editable_range.clone(); - let snapshot = snapshot.clone(); - async move { Self::parse_edits(output_excerpt, editable_range, &snapshot) } + position: language::Anchor, + cx: &mut Context, + ) { + self.queue_prediction_refresh(project.clone(), buffer.entity_id(), cx, move |this, cx| { + let Some(request_task) = this + .update(cx, |this, cx| { + this.request_prediction(&project, &buffer, position, cx) }) - .await? - .into(); - - let Some((edits, snapshot, edit_preview)) = buffer.read_with(cx, { - let edits = edits.clone(); - move |buffer, cx| { - let new_snapshot = buffer.snapshot(); - let edits: Arc<[(Range, Arc)]> = - edit_prediction::interpolate_edits(&snapshot, &new_snapshot, &edits)? - .into(); - Some((edits.clone(), new_snapshot, buffer.preview_edits(edits, cx))) - } - })? + .log_err() else { - return anyhow::Ok(None); + return Task::ready(anyhow::Ok(None)); }; - let request_id = Uuid::from_str(&request_id).context("failed to parse request id")?; - - let edit_preview = edit_preview.await; - - Ok(Some(EditPrediction { - id: EditPredictionId(request_id), - path, - excerpt_range: editable_range, - cursor_offset, - edits, - edit_preview, - snapshot, - input_outline: input_outline.into(), - input_events: input_events.into(), - input_excerpt: input_excerpt.into(), - output_excerpt, - buffer_snapshotted_at, - response_received_at: Instant::now(), - })) + let project = project.clone(); + cx.spawn(async move |cx| { + if let Some(prediction) = request_task.await? { + let id = prediction.id.clone(); + this.update(cx, |this, cx| { + let project_state = this + .projects + .get_mut(&project.entity_id()) + .context("Project not found")?; + + let new_prediction = CurrentEditPrediction { + requested_by: PredictionRequestedBy::Buffer(buffer.entity_id()), + prediction: prediction, + was_shown: false, + }; + + if project_state + .current_prediction + .as_ref() + .is_none_or(|old_prediction| { + new_prediction.should_replace_prediction(&old_prediction, cx) + }) + { + project_state.current_prediction = Some(new_prediction); + cx.notify(); + } + anyhow::Ok(()) + })??; + Ok(Some(id)) + } else { + Ok(None) + } + }) }) } - fn parse_edits( - output_excerpt: Arc, - editable_range: Range, - snapshot: &BufferSnapshot, - ) -> Result, Arc)>> { - let content = output_excerpt.replace(CURSOR_MARKER, ""); - - let start_markers = content - .match_indices(EDITABLE_REGION_START_MARKER) - .collect::>(); - anyhow::ensure!( - start_markers.len() == 1, - "expected exactly one start marker, found {}", - start_markers.len() - ); + pub fn refresh_prediction_from_diagnostics( + &mut self, + project: Entity, + cx: &mut Context, + ) { + let Some(zeta_project) = self.projects.get_mut(&project.entity_id()) else { + return; + }; - let end_markers = content - .match_indices(EDITABLE_REGION_END_MARKER) - .collect::>(); - anyhow::ensure!( - end_markers.len() == 1, - "expected exactly one end marker, found {}", - end_markers.len() - ); - - let sof_markers = content - .match_indices(START_OF_FILE_MARKER) - .collect::>(); - anyhow::ensure!( - sof_markers.len() <= 1, - "expected at most one start-of-file marker, found {}", - sof_markers.len() - ); + // Prefer predictions from buffer + if zeta_project.current_prediction.is_some() { + return; + }; - let codefence_start = start_markers[0].0; - let content = &content[codefence_start..]; + self.queue_prediction_refresh(project.clone(), project.entity_id(), cx, move |this, cx| { + let Some(open_buffer_task) = project + .update(cx, |project, cx| { + project + .active_entry() + .and_then(|entry| project.path_for_entry(entry, cx)) + .map(|path| project.open_buffer(path, cx)) + }) + .log_err() + .flatten() + else { + return Task::ready(anyhow::Ok(None)); + }; - let newline_ix = content.find('\n').context("could not find newline")?; - let content = &content[newline_ix + 1..]; + cx.spawn(async move |cx| { + let active_buffer = open_buffer_task.await?; + let snapshot = active_buffer.read_with(cx, |buffer, _cx| buffer.snapshot())?; + + let Some((jump_buffer, jump_position)) = Self::next_diagnostic_location( + active_buffer, + &snapshot, + Default::default(), + Default::default(), + &project, + cx, + ) + .await? + else { + return anyhow::Ok(None); + }; - let codefence_end = content - .rfind(&format!("\n{EDITABLE_REGION_END_MARKER}")) - .context("could not find end marker")?; - let new_text = &content[..codefence_end]; + let Some(prediction) = this + .update(cx, |this, cx| { + this.request_prediction(&project, &jump_buffer, jump_position, cx) + })? + .await? + else { + return anyhow::Ok(None); + }; - let old_text = snapshot - .text_for_range(editable_range.clone()) - .collect::(); + let id = prediction.id.clone(); + this.update(cx, |this, cx| { + if let Some(zeta_project) = this.projects.get_mut(&project.entity_id()) { + zeta_project.current_prediction.get_or_insert_with(|| { + cx.notify(); + CurrentEditPrediction { + requested_by: PredictionRequestedBy::DiagnosticsUpdate, + prediction, + was_shown: false, + } + }); + } + })?; - Ok(Self::compute_edits( - old_text, - new_text, - editable_range.start, - snapshot, - )) + anyhow::Ok(Some(id)) + }) + }); } - pub fn compute_edits( - old_text: String, - new_text: &str, - offset: usize, - snapshot: &BufferSnapshot, - ) -> Vec<(Range, Arc)> { - text_diff(&old_text, new_text) - .into_iter() - .map(|(mut old_range, new_text)| { - old_range.start += offset; - old_range.end += offset; + #[cfg(not(test))] + pub const THROTTLE_TIMEOUT: Duration = Duration::from_millis(300); + #[cfg(test)] + pub const THROTTLE_TIMEOUT: Duration = Duration::ZERO; - let prefix_len = common_prefix( - snapshot.chars_for_range(old_range.clone()), - new_text.chars(), - ); - old_range.start += prefix_len; + fn queue_prediction_refresh( + &mut self, + project: Entity, + throttle_entity: EntityId, + cx: &mut Context, + do_refresh: impl FnOnce( + WeakEntity, + &mut AsyncApp, + ) -> Task>> + + 'static, + ) { + let zeta_project = self.get_or_init_zeta_project(&project, cx); + let pending_prediction_id = zeta_project.next_pending_prediction_id; + zeta_project.next_pending_prediction_id += 1; + let last_request = zeta_project.last_prediction_refresh; - let suffix_len = common_prefix( - snapshot.reversed_chars_for_range(old_range.clone()), - new_text[prefix_len..].chars().rev(), - ); - old_range.end = old_range.end.saturating_sub(suffix_len); + // TODO report cancelled requests like in zeta1 + let task = cx.spawn(async move |this, cx| { + if let Some((last_entity, last_timestamp)) = last_request + && throttle_entity == last_entity + && let Some(timeout) = + (last_timestamp + Self::THROTTLE_TIMEOUT).checked_duration_since(Instant::now()) + { + cx.background_executor().timer(timeout).await; + } - let new_text = new_text[prefix_len..new_text.len() - suffix_len].into(); - let range = if old_range.is_empty() { - let anchor = snapshot.anchor_after(old_range.start); - anchor..anchor - } else { - snapshot.anchor_after(old_range.start)..snapshot.anchor_before(old_range.end) - }; - (range, new_text) + let edit_prediction_id = do_refresh(this.clone(), cx).await.log_err().flatten(); + + // When a prediction completes, remove it from the pending list, and cancel + // any pending predictions that were enqueued before it. + this.update(cx, |this, cx| { + let zeta_project = this.get_or_init_zeta_project(&project, cx); + let mut pending_predictions = mem::take(&mut zeta_project.pending_predictions); + for (ix, pending_prediction) in pending_predictions.iter().enumerate() { + if pending_prediction.id == pending_prediction_id { + pending_predictions.remove(ix); + for pending_prediction in pending_predictions.drain(0..ix) { + this.cancel_pending_prediction(pending_prediction, cx) + } + break; + } + } + this.get_or_init_zeta_project(&project, cx) + .pending_predictions = pending_predictions; + cx.notify(); }) - .collect() - } + .ok(); - pub fn is_completion_rated(&self, completion_id: EditPredictionId) -> bool { - self.rated_completions.contains(&completion_id) - } + edit_prediction_id + }); - pub fn completion_shown(&mut self, completion: &EditPrediction, cx: &mut Context) { - self.shown_completions.push_front(completion.clone()); - if self.shown_completions.len() > 50 { - let completion = self.shown_completions.pop_back().unwrap(); - self.rated_completions.remove(&completion.id); + if zeta_project.pending_predictions.len() <= 1 { + zeta_project.pending_predictions.push(PendingPrediction { + id: pending_prediction_id, + task, + }); + } else if zeta_project.pending_predictions.len() == 2 { + let pending_prediction = zeta_project.pending_predictions.pop().unwrap(); + zeta_project.pending_predictions.push(PendingPrediction { + id: pending_prediction_id, + task, + }); + self.cancel_pending_prediction(pending_prediction, cx); } - cx.notify(); } - pub fn rate_completion( + pub fn request_prediction( &mut self, - completion: &EditPrediction, - rating: EditPredictionRating, - feedback: String, + project: &Entity, + active_buffer: &Entity, + position: language::Anchor, cx: &mut Context, - ) { - self.rated_completions.insert(completion.id); - telemetry::event!( - "Edit Prediction Rated", - rating, - input_events = completion.input_events, - input_excerpt = completion.input_excerpt, - input_outline = completion.input_outline, - output_excerpt = completion.output_excerpt, - feedback - ); - self.client.telemetry().flush_events().detach(); - cx.notify(); - } - - pub fn shown_completions(&self) -> impl DoubleEndedIterator { - self.shown_completions.iter() - } - - pub fn shown_completions_len(&self) -> usize { - self.shown_completions.len() + ) -> Task>> { + match self.edit_prediction_model { + ZetaEditPredictionModel::Zeta1 => { + request_prediction_with_zeta1(self, project, active_buffer, position, cx) + } + ZetaEditPredictionModel::Zeta2 => { + self.request_prediction_with_zeta2(project, active_buffer, position, cx) + } + ZetaEditPredictionModel::Sweep => { + self.request_prediction_with_sweep(project, active_buffer, position, true, cx) + } + } } - fn report_changes_for_buffer( + fn request_prediction_with_sweep( &mut self, - buffer: &Entity, project: &Entity, + active_buffer: &Entity, + position: language::Anchor, + allow_jump: bool, cx: &mut Context, - ) -> BufferSnapshot { - let zeta_project = self.get_or_init_zeta_project(project, cx); - let registered_buffer = Self::register_buffer_impl(zeta_project, buffer, project, cx); + ) -> Task>> { + let snapshot = active_buffer.read(cx).snapshot(); + let debug_info = self.sweep_ai_debug_info.clone(); + let Some(api_token) = self.sweep_api_token.clone() else { + return Task::ready(Ok(None)); + }; + let full_path: Arc = snapshot + .file() + .map(|file| file.full_path(cx)) + .unwrap_or_else(|| "untitled".into()) + .into(); + + let project_file = project::File::from_dyn(snapshot.file()); + let repo_name = project_file + .map(|file| file.worktree.read(cx).root_name_str()) + .unwrap_or("untitled") + .into(); + let offset = position.to_offset(&snapshot); + + let project_state = self.get_or_init_zeta_project(project, cx); + let events = project_state.events(cx); + let has_events = !events.is_empty(); + let recent_buffers = project_state.recent_paths.iter().cloned(); + let http_client = cx.http_client(); + + let recent_buffer_snapshots = recent_buffers + .filter_map(|project_path| { + let buffer = project.read(cx).get_open_buffer(&project_path, cx)?; + if active_buffer == &buffer { + None + } else { + Some(buffer.read(cx).snapshot()) + } + }) + .take(3) + .collect::>(); - let new_snapshot = buffer.read(cx).snapshot(); - if new_snapshot.version != registered_buffer.snapshot.version { - let old_snapshot = mem::replace(&mut registered_buffer.snapshot, new_snapshot.clone()); - Self::push_event( - zeta_project, - Event::BufferChange { - old_snapshot, - new_snapshot: new_snapshot.clone(), - timestamp: Instant::now(), - }, - ); - } + const DIAGNOSTIC_LINES_RANGE: u32 = 20; - new_snapshot - } + let cursor_point = position.to_point(&snapshot); + let diagnostic_search_start = cursor_point.row.saturating_sub(DIAGNOSTIC_LINES_RANGE); + let diagnostic_search_end = cursor_point.row + DIAGNOSTIC_LINES_RANGE; + let diagnostic_search_range = + Point::new(diagnostic_search_start, 0)..Point::new(diagnostic_search_end, 0); + let buffer_snapshotted_at = Instant::now(); - fn can_collect_file(&self, file: &Arc, cx: &App) -> bool { - self.data_collection_choice.is_enabled() && self.is_file_open_source(file, cx) - } + let result = cx.background_spawn({ + let snapshot = snapshot.clone(); + let diagnostic_search_range = diagnostic_search_range.clone(); + async move { + let text = snapshot.text(); - fn can_collect_events(&self, events: &[Event], cx: &App) -> bool { - if !self.data_collection_choice.is_enabled() { - return false; - } - let mut last_checked_file = None; - for event in events { - match event { - Event::BufferChange { - old_snapshot, - new_snapshot, - .. - } => { - if let Some(old_file) = old_snapshot.file() - && let Some(new_file) = new_snapshot.file() - { - if let Some(last_checked_file) = last_checked_file - && Arc::ptr_eq(last_checked_file, old_file) - && Arc::ptr_eq(last_checked_file, new_file) - { - continue; - } - if !self.can_collect_file(old_file, cx) { - return false; - } - if !Arc::ptr_eq(old_file, new_file) && !self.can_collect_file(new_file, cx) - { - return false; + let mut recent_changes = String::new(); + for event in &events { + sweep_ai::write_event(event.as_ref(), &mut recent_changes).unwrap(); + } + + let mut file_chunks = recent_buffer_snapshots + .into_iter() + .map(|snapshot| { + let end_point = Point::new(30, 0).min(snapshot.max_point()); + sweep_ai::FileChunk { + content: snapshot.text_for_range(Point::zero()..end_point).collect(), + file_path: snapshot + .file() + .map(|f| f.path().as_unix_str()) + .unwrap_or("untitled") + .to_string(), + start_line: 0, + end_line: end_point.row as usize, + timestamp: snapshot.file().and_then(|file| { + Some( + file.disk_state() + .mtime()? + .to_seconds_and_nanos_for_persistence()? + .0, + ) + }), } - last_checked_file = Some(new_file); - } else { - return false; - } + }) + .collect::>(); + + let diagnostic_entries = + snapshot.diagnostics_in_range(diagnostic_search_range, false); + let mut diagnostic_content = String::new(); + let mut diagnostic_count = 0; + + for entry in diagnostic_entries { + let start_point: Point = entry.range.start; + + let severity = match entry.diagnostic.severity { + DiagnosticSeverity::ERROR => "error", + DiagnosticSeverity::WARNING => "warning", + DiagnosticSeverity::INFORMATION => "info", + DiagnosticSeverity::HINT => "hint", + _ => continue, + }; + + diagnostic_count += 1; + + writeln!( + &mut diagnostic_content, + "{} at line {}: {}", + severity, + start_point.row + 1, + entry.diagnostic.message + )?; + } + + if !diagnostic_content.is_empty() { + file_chunks.push(sweep_ai::FileChunk { + file_path: format!("Diagnostics for {}", full_path.display()), + start_line: 0, + end_line: diagnostic_count, + content: diagnostic_content, + timestamp: None, + }); + } + + let request_body = sweep_ai::AutocompleteRequest { + debug_info, + repo_name, + file_path: full_path.clone(), + file_contents: text.clone(), + original_file_contents: text, + cursor_position: offset, + recent_changes: recent_changes.clone(), + changes_above_cursor: true, + multiple_suggestions: false, + branch: None, + file_chunks, + retrieval_chunks: vec![], + recent_user_actions: vec![], + // TODO + privacy_mode_enabled: false, + }; + + let mut buf: Vec = Vec::new(); + let writer = brotli::CompressorWriter::new(&mut buf, 4096, 11, 22); + serde_json::to_writer(writer, &request_body)?; + let body: AsyncBody = buf.into(); + + let inputs = EditPredictionInputs { + events, + included_files: vec![cloud_llm_client::predict_edits_v3::IncludedFile { + path: full_path.clone(), + max_row: cloud_llm_client::predict_edits_v3::Line(snapshot.max_point().row), + excerpts: vec![cloud_llm_client::predict_edits_v3::Excerpt { + start_line: cloud_llm_client::predict_edits_v3::Line(0), + text: request_body.file_contents.into(), + }], + }], + cursor_point: cloud_llm_client::predict_edits_v3::Point { + column: cursor_point.column, + line: cloud_llm_client::predict_edits_v3::Line(cursor_point.row), + }, + cursor_path: full_path.clone(), + }; + + const SWEEP_API_URL: &str = + "https://autocomplete.sweep.dev/backend/next_edit_autocomplete"; + + let request = http_client::Request::builder() + .uri(SWEEP_API_URL) + .header("Content-Type", "application/json") + .header("Authorization", format!("Bearer {}", api_token)) + .header("Connection", "keep-alive") + .header("Content-Encoding", "br") + .method(Method::POST) + .body(body)?; + + let mut response = http_client.send(request).await?; + + let mut body: Vec = Vec::new(); + response.body_mut().read_to_end(&mut body).await?; + + let response_received_at = Instant::now(); + if !response.status().is_success() { + anyhow::bail!( + "Request failed with status: {:?}\nBody: {}", + response.status(), + String::from_utf8_lossy(&body), + ); + }; + + let response: sweep_ai::AutocompleteResponse = serde_json::from_slice(&body)?; + + let old_text = snapshot + .text_for_range(response.start_index..response.end_index) + .collect::(); + let edits = language::text_diff(&old_text, &response.completion) + .into_iter() + .map(|(range, text)| { + ( + snapshot.anchor_after(response.start_index + range.start) + ..snapshot.anchor_before(response.start_index + range.end), + text, + ) + }) + .collect::>(); + + anyhow::Ok(( + response.autocomplete_id, + edits, + snapshot, + response_received_at, + inputs, + )) + } + }); + + let buffer = active_buffer.clone(); + let project = project.clone(); + let active_buffer = active_buffer.clone(); + + cx.spawn(async move |this, cx| { + let (id, edits, old_snapshot, response_received_at, inputs) = result.await?; + + if edits.is_empty() { + if has_events + && allow_jump + && let Some((jump_buffer, jump_position)) = Self::next_diagnostic_location( + active_buffer, + &snapshot, + diagnostic_search_range, + cursor_point, + &project, + cx, + ) + .await? + { + return this + .update(cx, |this, cx| { + this.request_prediction_with_sweep( + &project, + &jump_buffer, + jump_position, + false, + cx, + ) + })? + .await; } + + return anyhow::Ok(None); } - } - true - } - fn is_file_open_source(&self, file: &Arc, cx: &App) -> bool { - if !file.is_local() || file.is_private() { - return false; - } - self.license_detection_watchers - .get(&file.worktree_id(cx)) - .is_some_and(|watcher| watcher.is_project_open_source()) + anyhow::Ok( + EditPrediction::new( + EditPredictionId(id.into()), + &buffer, + &old_snapshot, + edits.into(), + buffer_snapshotted_at, + response_received_at, + inputs, + cx, + ) + .await, + ) + }) } - fn load_data_collection_choice() -> DataCollectionChoice { - let choice = KEY_VALUE_STORE - .read_kvp(ZED_PREDICT_DATA_COLLECTION_CHOICE) - .log_err() - .flatten(); + async fn next_diagnostic_location( + active_buffer: Entity, + active_buffer_snapshot: &BufferSnapshot, + active_buffer_diagnostic_search_range: Range, + active_buffer_cursor_point: Point, + project: &Entity, + cx: &mut AsyncApp, + ) -> Result, language::Anchor)>> { + // find the closest diagnostic to the cursor that wasn't close enough to be included in the last request + let mut jump_location = active_buffer_snapshot + .diagnostic_groups(None) + .into_iter() + .filter_map(|(_, group)| { + let range = &group.entries[group.primary_ix] + .range + .to_point(&active_buffer_snapshot); + if range.overlaps(&active_buffer_diagnostic_search_range) { + None + } else { + Some(range.start) + } + }) + .min_by_key(|probe| probe.row.abs_diff(active_buffer_cursor_point.row)) + .map(|position| { + ( + active_buffer.clone(), + active_buffer_snapshot.anchor_before(position), + ) + }); - match choice.as_deref() { - Some("true") => DataCollectionChoice::Enabled, - Some("false") => DataCollectionChoice::Disabled, - Some(_) => { - log::error!("unknown value in '{ZED_PREDICT_DATA_COLLECTION_CHOICE}'"); - DataCollectionChoice::NotAnswered + if jump_location.is_none() { + let active_buffer_path = active_buffer.read_with(cx, |buffer, cx| { + let file = buffer.file()?; + + Some(ProjectPath { + worktree_id: file.worktree_id(cx), + path: file.path().clone(), + }) + })?; + + let buffer_task = project.update(cx, |project, cx| { + let (path, _, _) = project + .diagnostic_summaries(false, cx) + .filter(|(path, _, _)| Some(path) != active_buffer_path.as_ref()) + .max_by_key(|(path, _, _)| { + // find the buffer with errors that shares most parent directories + path.path + .components() + .zip( + active_buffer_path + .as_ref() + .map(|p| p.path.components()) + .unwrap_or_default(), + ) + .take_while(|(a, b)| a == b) + .count() + })?; + + Some(project.open_buffer(path, cx)) + })?; + + if let Some(buffer_task) = buffer_task { + let closest_buffer = buffer_task.await?; + + jump_location = closest_buffer + .read_with(cx, |buffer, _cx| { + buffer + .buffer_diagnostics(None) + .into_iter() + .min_by_key(|entry| entry.diagnostic.severity) + .map(|entry| entry.range.start) + })? + .map(|position| (closest_buffer, position)); } - None => DataCollectionChoice::NotAnswered, } - } - fn toggle_data_collection_choice(&mut self, cx: &mut Context) { - self.data_collection_choice = self.data_collection_choice.toggle(); - let new_choice = self.data_collection_choice; - db::write_and_log(cx, move || { - KEY_VALUE_STORE.write_kvp( - ZED_PREDICT_DATA_COLLECTION_CHOICE.into(), - new_choice.is_enabled().to_string(), - ) - }); + anyhow::Ok(jump_location) } - fn discard_completion( + fn request_prediction_with_zeta2( &mut self, - completion_id: EditPredictionId, - was_shown: bool, + project: &Entity, + active_buffer: &Entity, + position: language::Anchor, cx: &mut Context, - ) { - self.discarded_completions.push(EditPredictionRejection { - request_id: completion_id.to_string(), - was_shown, - }); + ) -> Task>> { + let project_state = self.projects.get(&project.entity_id()); - let reached_request_limit = - self.discarded_completions.len() >= MAX_EDIT_PREDICTION_REJECTIONS_PER_REQUEST; - let discard_completions_tx = self.discard_completions_tx.clone(); - self.discard_completions_debounce_task = Some(cx.spawn(async move |_this, cx| { - const DISCARD_COMPLETIONS_DEBOUNCE: Duration = Duration::from_secs(15); - if !reached_request_limit { - cx.background_executor() - .timer(DISCARD_COMPLETIONS_DEBOUNCE) - .await; - } - discard_completions_tx.unbounded_send(()).log_err(); - })); - } -} + let index_state = project_state.and_then(|state| { + state + .syntax_index + .as_ref() + .map(|syntax_index| syntax_index.read_with(cx, |index, _cx| index.state().clone())) + }); + let options = self.options.clone(); + let active_snapshot = active_buffer.read(cx).snapshot(); + let buffer_snapshotted_at = Instant::now(); + let Some(excerpt_path) = active_snapshot + .file() + .map(|path| -> Arc { path.full_path(cx).into() }) + else { + return Task::ready(Err(anyhow!("No file path for excerpt"))); + }; + let client = self.client.clone(); + let llm_token = self.llm_token.clone(); + let app_version = AppVersion::global(cx); + let worktree_snapshots = project + .read(cx) + .worktrees(cx) + .map(|worktree| worktree.read(cx).snapshot()) + .collect::>(); + let debug_tx = self.debug_tx.clone(); -pub struct PerformPredictEditsParams { - pub client: Arc, - pub llm_token: LlmApiToken, - pub app_version: Version, - pub body: PredictEditsBody, -} + let events = project_state + .map(|state| state.events(cx)) + .unwrap_or_default(); -#[derive(Error, Debug)] -#[error( - "You must update to Zed version {minimum_version} or higher to continue using edit predictions." -)] -pub struct ZedUpdateRequiredError { - minimum_version: Version, -} + let diagnostics = active_snapshot.diagnostic_sets().clone(); -fn common_prefix, T2: Iterator>(a: T1, b: T2) -> usize { - a.zip(b) - .take_while(|(a, b)| a == b) - .map(|(a, _)| a.len_utf8()) - .sum() -} + let file = active_buffer.read(cx).file(); + let parent_abs_path = project::File::from_dyn(file).and_then(|f| { + let mut path = f.worktree.read(cx).absolutize(&f.path); + if path.pop() { Some(path) } else { None } + }); -fn git_info_for_file( - project: &Entity, - project_path: &ProjectPath, - cx: &App, -) -> Option { - let git_store = project.read(cx).git_store().read(cx); - if let Some((repository, _repo_path)) = - git_store.repository_and_path_for_project_path(project_path, cx) - { - let repository = repository.read(cx); - let head_sha = repository - .head_commit + // TODO data collection + let can_collect_data = file .as_ref() - .map(|head_commit| head_commit.sha.to_string()); - let remote_origin_url = repository.remote_origin_url.clone(); - let remote_upstream_url = repository.remote_upstream_url.clone(); - if head_sha.is_none() && remote_origin_url.is_none() && remote_upstream_url.is_none() { - return None; - } - Some(PredictEditsGitInfo { - head_sha, - remote_origin_url, - remote_upstream_url, - }) - } else { - None - } -} + .map_or(false, |file| self.can_collect_file(project, file, cx)); + + let empty_context_files = HashMap::default(); + let context_files = project_state + .and_then(|project_state| project_state.context.as_ref()) + .unwrap_or(&empty_context_files); + + #[cfg(feature = "eval-support")] + let parsed_fut = futures::future::join_all( + context_files + .keys() + .map(|buffer| buffer.read(cx).parsing_idle()), + ); -pub struct GatherContextOutput { - pub body: PredictEditsBody, - pub editable_range: Range, - pub included_events_count: usize, -} + let mut included_files = context_files + .iter() + .filter_map(|(buffer_entity, ranges)| { + let buffer = buffer_entity.read(cx); + Some(( + buffer_entity.clone(), + buffer.snapshot(), + buffer.file()?.full_path(cx).into(), + ranges.clone(), + )) + }) + .collect::>(); -pub fn gather_context( - full_path_str: String, - snapshot: &BufferSnapshot, - cursor_point: language::Point, - prompt_for_events: impl FnOnce() -> (String, usize) + Send + 'static, - cx: &App, -) -> Task> { - cx.background_spawn({ - let snapshot = snapshot.clone(); - async move { - let input_excerpt = excerpt_for_cursor_position( - cursor_point, - &full_path_str, - &snapshot, - MAX_REWRITE_TOKENS, - MAX_CONTEXT_TOKENS, - ); - let (input_events, included_events_count) = prompt_for_events(); - let editable_range = input_excerpt.editable_range.to_offset(&snapshot); - - let body = PredictEditsBody { - input_events, - input_excerpt: input_excerpt.prompt, - can_collect_data: false, - diagnostic_groups: None, - git_info: None, - outline: None, - speculated_output: None, - }; + included_files.sort_by(|(_, _, path_a, ranges_a), (_, _, path_b, ranges_b)| { + (path_a, ranges_a.len()).cmp(&(path_b, ranges_b.len())) + }); - Ok(GatherContextOutput { - body, - editable_range, - included_events_count, - }) - } - }) -} + #[cfg(feature = "eval-support")] + let eval_cache = self.eval_cache.clone(); -fn prompt_for_events_impl(events: &[Event], mut remaining_tokens: usize) -> (String, usize) { - let mut result = String::new(); - for (ix, event) in events.iter().rev().enumerate() { - let event_string = event.to_prompt(); - let event_tokens = guess_token_count(event_string.len()); - if event_tokens > remaining_tokens { - return (result, ix); - } + let request_task = cx.background_spawn({ + let active_buffer = active_buffer.clone(); + async move { + #[cfg(feature = "eval-support")] + parsed_fut.await; - if !result.is_empty() { - result.insert_str(0, "\n\n"); - } - result.insert_str(0, &event_string); - remaining_tokens -= event_tokens; - } - return (result, events.len()); -} + let index_state = if let Some(index_state) = index_state { + Some(index_state.lock_owned().await) + } else { + None + }; -struct RegisteredBuffer { - snapshot: BufferSnapshot, - _subscriptions: [gpui::Subscription; 2], -} + let cursor_offset = position.to_offset(&active_snapshot); + let cursor_point = cursor_offset.to_point(&active_snapshot); -#[derive(Clone)] -pub enum Event { - BufferChange { - old_snapshot: BufferSnapshot, - new_snapshot: BufferSnapshot, - timestamp: Instant, - }, -} + let before_retrieval = Instant::now(); -impl Event { - fn to_prompt(&self) -> String { - match self { - Event::BufferChange { - old_snapshot, - new_snapshot, - .. - } => { - let mut prompt = String::new(); - - let old_path = old_snapshot - .file() - .map(|f| f.path().as_ref()) - .unwrap_or(RelPath::unix("untitled").unwrap()); - let new_path = new_snapshot - .file() - .map(|f| f.path().as_ref()) - .unwrap_or(RelPath::unix("untitled").unwrap()); - if old_path != new_path { - writeln!(prompt, "User renamed {:?} to {:?}\n", old_path, new_path).unwrap(); + let (diagnostic_groups, diagnostic_groups_truncated) = + Self::gather_nearby_diagnostics( + cursor_offset, + &diagnostics, + &active_snapshot, + options.max_diagnostic_bytes, + ); + + let cloud_request = match options.context { + ContextMode::Agentic(context_options) => { + let Some(excerpt) = EditPredictionExcerpt::select_from_buffer( + cursor_point, + &active_snapshot, + &context_options.excerpt, + index_state.as_deref(), + ) else { + return Ok((None, None)); + }; + + let excerpt_anchor_range = active_snapshot.anchor_after(excerpt.range.start) + ..active_snapshot.anchor_before(excerpt.range.end); + + if let Some(buffer_ix) = + included_files.iter().position(|(_, snapshot, _, _)| { + snapshot.remote_id() == active_snapshot.remote_id() + }) + { + let (_, buffer, _, ranges) = &mut included_files[buffer_ix]; + ranges.push(excerpt_anchor_range); + retrieval_search::merge_anchor_ranges(ranges, buffer); + let last_ix = included_files.len() - 1; + included_files.swap(buffer_ix, last_ix); + } else { + included_files.push(( + active_buffer.clone(), + active_snapshot.clone(), + excerpt_path.clone(), + vec![excerpt_anchor_range], + )); + } + + let included_files = included_files + .iter() + .map(|(_, snapshot, path, ranges)| { + let ranges = ranges + .iter() + .map(|range| { + let point_range = range.to_point(&snapshot); + Line(point_range.start.row)..Line(point_range.end.row) + }) + .collect::>(); + let excerpts = assemble_excerpts(&snapshot, ranges); + predict_edits_v3::IncludedFile { + path: path.clone(), + max_row: Line(snapshot.max_point().row), + excerpts, + } + }) + .collect::>(); + + predict_edits_v3::PredictEditsRequest { + excerpt_path, + excerpt: String::new(), + excerpt_line_range: Line(0)..Line(0), + excerpt_range: 0..0, + cursor_point: predict_edits_v3::Point { + line: predict_edits_v3::Line(cursor_point.row), + column: cursor_point.column, + }, + included_files, + referenced_declarations: vec![], + events, + can_collect_data, + diagnostic_groups, + diagnostic_groups_truncated, + debug_info: debug_tx.is_some(), + prompt_max_bytes: Some(options.max_prompt_bytes), + prompt_format: options.prompt_format, + // TODO [zeta2] + signatures: vec![], + excerpt_parent: None, + git_info: None, + } + } + ContextMode::Syntax(context_options) => { + let Some(context) = EditPredictionContext::gather_context( + cursor_point, + &active_snapshot, + parent_abs_path.as_deref(), + &context_options, + index_state.as_deref(), + ) else { + return Ok((None, None)); + }; + + make_syntax_context_cloud_request( + excerpt_path, + context, + events, + can_collect_data, + diagnostic_groups, + diagnostic_groups_truncated, + None, + debug_tx.is_some(), + &worktree_snapshots, + index_state.as_deref(), + Some(options.max_prompt_bytes), + options.prompt_format, + ) + } + }; + + let prompt_result = cloud_zeta2_prompt::build_prompt(&cloud_request); + + let inputs = EditPredictionInputs { + included_files: cloud_request.included_files, + events: cloud_request.events, + cursor_point: cloud_request.cursor_point, + cursor_path: cloud_request.excerpt_path, + }; + + let retrieval_time = Instant::now() - before_retrieval; + + let debug_response_tx = if let Some(debug_tx) = &debug_tx { + let (response_tx, response_rx) = oneshot::channel(); + + debug_tx + .unbounded_send(ZetaDebugInfo::EditPredictionRequested( + ZetaEditPredictionDebugInfo { + inputs: inputs.clone(), + retrieval_time, + buffer: active_buffer.downgrade(), + local_prompt: match prompt_result.as_ref() { + Ok((prompt, _)) => Ok(prompt.clone()), + Err(err) => Err(err.to_string()), + }, + position, + response_rx, + }, + )) + .ok(); + Some(response_tx) + } else { + None + }; + + if cfg!(debug_assertions) && env::var("ZED_ZETA2_SKIP_REQUEST").is_ok() { + if let Some(debug_response_tx) = debug_response_tx { + debug_response_tx + .send((Err("Request skipped".to_string()), Duration::ZERO)) + .ok(); + } + anyhow::bail!("Skipping request because ZED_ZETA2_SKIP_REQUEST is set") } - let diff = language::unified_diff(&old_snapshot.text(), &new_snapshot.text()); - if !diff.is_empty() { - write!( - prompt, - "User edited {:?}:\n```diff\n{}\n```", - new_path, diff - ) - .unwrap(); + let (prompt, _) = prompt_result?; + let generation_params = + cloud_zeta2_prompt::generation_params(cloud_request.prompt_format); + let request = open_ai::Request { + model: EDIT_PREDICTIONS_MODEL_ID.clone(), + messages: vec![open_ai::RequestMessage::User { + content: open_ai::MessageContent::Plain(prompt), + }], + stream: false, + max_completion_tokens: None, + stop: generation_params.stop.unwrap_or_default(), + temperature: generation_params.temperature.unwrap_or(0.7), + tool_choice: None, + parallel_tool_calls: None, + tools: vec![], + prompt_cache_key: None, + reasoning_effort: None, + }; + + log::trace!("Sending edit prediction request"); + + let before_request = Instant::now(); + let response = Self::send_raw_llm_request( + request, + client, + llm_token, + app_version, + #[cfg(feature = "eval-support")] + eval_cache, + #[cfg(feature = "eval-support")] + EvalCacheEntryKind::Prediction, + ) + .await; + let received_response_at = Instant::now(); + let request_time = received_response_at - before_request; + + log::trace!("Got edit prediction response"); + + if let Some(debug_response_tx) = debug_response_tx { + debug_response_tx + .send(( + response + .as_ref() + .map_err(|err| err.to_string()) + .map(|response| response.0.clone()), + request_time, + )) + .ok(); } - prompt + let (res, usage) = response?; + let request_id = EditPredictionId(res.id.clone().into()); + let Some(mut output_text) = text_from_response(res) else { + return Ok((None, usage)); + }; + + if output_text.contains(CURSOR_MARKER) { + log::trace!("Stripping out {CURSOR_MARKER} from response"); + output_text = output_text.replace(CURSOR_MARKER, ""); + } + + let get_buffer_from_context = |path: &Path| { + included_files + .iter() + .find_map(|(_, buffer, probe_path, ranges)| { + if probe_path.as_ref() == path { + Some((buffer, ranges.as_slice())) + } else { + None + } + }) + }; + + let (edited_buffer_snapshot, edits) = match options.prompt_format { + PromptFormat::NumLinesUniDiff => { + // TODO: Implement parsing of multi-file diffs + crate::udiff::parse_diff(&output_text, get_buffer_from_context).await? + } + PromptFormat::Minimal + | PromptFormat::MinimalQwen + | PromptFormat::SeedCoder1120 => { + if output_text.contains("--- a/\n+++ b/\nNo edits") { + let edits = vec![]; + (&active_snapshot, edits) + } else { + crate::udiff::parse_diff(&output_text, get_buffer_from_context).await? + } + } + PromptFormat::OldTextNewText => { + crate::xml_edits::parse_xml_edits(&output_text, get_buffer_from_context) + .await? + } + _ => { + bail!("unsupported prompt format {}", options.prompt_format) + } + }; + + let edited_buffer = included_files + .iter() + .find_map(|(buffer, snapshot, _, _)| { + if snapshot.remote_id() == edited_buffer_snapshot.remote_id() { + Some(buffer.clone()) + } else { + None + } + }) + .context("Failed to find buffer in included_buffers")?; + + anyhow::Ok(( + Some(( + request_id, + inputs, + edited_buffer, + edited_buffer_snapshot.clone(), + edits, + received_response_at, + )), + usage, + )) } - } - } -} + }); -#[derive(Debug, Clone)] -struct CurrentEditPrediction { - buffer_id: EntityId, - completion: EditPrediction, - was_shown: bool, - was_accepted: bool, -} + cx.spawn({ + async move |this, cx| { + let Some(( + id, + inputs, + edited_buffer, + edited_buffer_snapshot, + edits, + received_response_at, + )) = Self::handle_api_response(&this, request_task.await, cx)? + else { + return Ok(None); + }; -impl CurrentEditPrediction { - fn should_replace_completion(&self, old_completion: &Self, snapshot: &BufferSnapshot) -> bool { - if self.buffer_id != old_completion.buffer_id { - return true; - } + // TODO telemetry: duration, etc + Ok(EditPrediction::new( + id, + &edited_buffer, + &edited_buffer_snapshot, + edits.into(), + buffer_snapshotted_at, + received_response_at, + inputs, + cx, + ) + .await) + } + }) + } - let Some(old_edits) = old_completion.completion.interpolate(snapshot) else { - return true; - }; - let Some(new_edits) = self.completion.interpolate(snapshot) else { - return false; + async fn send_raw_llm_request( + request: open_ai::Request, + client: Arc, + llm_token: LlmApiToken, + app_version: Version, + #[cfg(feature = "eval-support")] eval_cache: Option>, + #[cfg(feature = "eval-support")] eval_cache_kind: EvalCacheEntryKind, + ) -> Result<(open_ai::Response, Option)> { + let url = if let Some(predict_edits_url) = PREDICT_EDITS_URL.as_ref() { + http_client::Url::parse(&predict_edits_url)? + } else { + client + .http_client() + .build_zed_llm_url("/predict_edits/raw", &[])? }; - if old_edits.len() == 1 && new_edits.len() == 1 { - let (old_range, old_text) = &old_edits[0]; - let (new_range, new_text) = &new_edits[0]; - new_range == old_range && new_text.starts_with(old_text.as_ref()) - } else { - true - } - } -} + #[cfg(feature = "eval-support")] + let cache_key = if let Some(cache) = eval_cache { + use collections::FxHasher; + use std::hash::{Hash, Hasher}; -struct PendingCompletion { - id: usize, - task: Task<()>, -} + let mut hasher = FxHasher::default(); + url.hash(&mut hasher); + let request_str = serde_json::to_string_pretty(&request)?; + request_str.hash(&mut hasher); + let hash = hasher.finish(); -#[derive(Debug, Clone, Copy)] -pub enum DataCollectionChoice { - NotAnswered, - Enabled, - Disabled, -} + let key = (eval_cache_kind, hash); + if let Some(response_str) = cache.read(key) { + return Ok((serde_json::from_str(&response_str)?, None)); + } -impl DataCollectionChoice { - pub fn is_enabled(self) -> bool { - match self { - Self::Enabled => true, - Self::NotAnswered | Self::Disabled => false, - } - } + Some((cache, request_str, key)) + } else { + None + }; - pub fn is_answered(self) -> bool { - match self { - Self::Enabled | Self::Disabled => true, - Self::NotAnswered => false, + let (response, usage) = Self::send_api_request( + |builder| { + let req = builder + .uri(url.as_ref()) + .body(serde_json::to_string(&request)?.into()); + Ok(req?) + }, + client, + llm_token, + app_version, + ) + .await?; + + #[cfg(feature = "eval-support")] + if let Some((cache, request, key)) = cache_key { + cache.write(key, &request, &serde_json::to_string_pretty(&response)?); } + + Ok((response, usage)) } - #[must_use] - pub fn toggle(&self) -> DataCollectionChoice { - match self { - Self::Enabled => Self::Disabled, - Self::Disabled => Self::Enabled, - Self::NotAnswered => Self::Enabled, + fn handle_api_response( + this: &WeakEntity, + response: Result<(T, Option)>, + cx: &mut gpui::AsyncApp, + ) -> Result { + match response { + Ok((data, usage)) => { + if let Some(usage) = usage { + this.update(cx, |this, cx| { + this.user_store.update(cx, |user_store, cx| { + user_store.update_edit_prediction_usage(usage, cx); + }); + }) + .ok(); + } + Ok(data) + } + Err(err) => { + if err.is::() { + cx.update(|cx| { + this.update(cx, |this, _cx| { + this.update_required = true; + }) + .ok(); + + let error_message: SharedString = err.to_string().into(); + show_app_notification( + NotificationId::unique::(), + cx, + move |cx| { + cx.new(|cx| { + ErrorMessagePrompt::new(error_message.clone(), cx) + .with_link_button("Update Zed", "https://zed.dev/releases") + }) + }, + ); + }) + .ok(); + } + Err(err) + } } } -} -impl From for DataCollectionChoice { - fn from(value: bool) -> Self { - match value { - true => DataCollectionChoice::Enabled, - false => DataCollectionChoice::Disabled, + async fn send_api_request( + build: impl Fn(http_client::http::request::Builder) -> Result>, + client: Arc, + llm_token: LlmApiToken, + app_version: Version, + ) -> Result<(Res, Option)> + where + Res: DeserializeOwned, + { + let http_client = client.http_client(); + let mut token = llm_token.acquire(&client).await?; + let mut did_retry = false; + + loop { + let request_builder = http_client::Request::builder().method(Method::POST); + + let request = build( + request_builder + .header("Content-Type", "application/json") + .header("Authorization", format!("Bearer {}", token)) + .header(ZED_VERSION_HEADER_NAME, app_version.to_string()), + )?; + + let mut response = http_client.send(request).await?; + + if let Some(minimum_required_version) = response + .headers() + .get(MINIMUM_REQUIRED_VERSION_HEADER_NAME) + .and_then(|version| Version::from_str(version.to_str().ok()?).ok()) + { + anyhow::ensure!( + app_version >= minimum_required_version, + ZedUpdateRequiredError { + minimum_version: minimum_required_version + } + ); + } + + if response.status().is_success() { + let usage = EditPredictionUsage::from_headers(response.headers()).ok(); + + let mut body = Vec::new(); + response.body_mut().read_to_end(&mut body).await?; + return Ok((serde_json::from_slice(&body)?, usage)); + } else if !did_retry + && response + .headers() + .get(EXPIRED_LLM_TOKEN_HEADER_NAME) + .is_some() + { + did_retry = true; + token = llm_token.refresh(&client).await?; + } else { + let mut body = String::new(); + response.body_mut().read_to_string(&mut body).await?; + anyhow::bail!( + "Request failed with status: {:?}\nBody: {}", + response.status(), + body + ); + } } } -} -async fn llm_token_retry( - llm_token: &LlmApiToken, - client: &Arc, - build_request: impl Fn(String) -> Result>, -) -> Result> { - let mut did_retry = false; - let http_client = client.http_client(); - let mut token = llm_token.acquire(client).await?; - loop { - let request = build_request(token.clone())?; - let response = http_client.send(request).await?; - - if !did_retry - && !response.status().is_success() - && response - .headers() - .get(EXPIRED_LLM_TOKEN_HEADER_NAME) - .is_some() - { - did_retry = true; - token = llm_token.refresh(client).await?; - continue; + pub const CONTEXT_RETRIEVAL_IDLE_DURATION: Duration = Duration::from_secs(10); + pub const CONTEXT_RETRIEVAL_DEBOUNCE_DURATION: Duration = Duration::from_secs(3); + + // Refresh the related excerpts when the user just beguns editing after + // an idle period, and after they pause editing. + fn refresh_context_if_needed( + &mut self, + project: &Entity, + buffer: &Entity, + cursor_position: language::Anchor, + cx: &mut Context, + ) { + if !matches!(&self.options().context, ContextMode::Agentic { .. }) { + return; } - return Ok(response); - } -} + let Some(zeta_project) = self.projects.get_mut(&project.entity_id()) else { + return; + }; -pub struct ZetaEditPredictionProvider { - zeta: Entity, - singleton_buffer: Option>, - pending_completions: ArrayVec, - canceled_completions: HashMap>, - next_pending_completion_id: usize, - current_completion: Option, - last_request_timestamp: Instant, - project: Entity, -} + let now = Instant::now(); + let was_idle = zeta_project + .refresh_context_timestamp + .map_or(true, |timestamp| { + now - timestamp > Self::CONTEXT_RETRIEVAL_IDLE_DURATION + }); + zeta_project.refresh_context_timestamp = Some(now); + zeta_project.refresh_context_debounce_task = Some(cx.spawn({ + let buffer = buffer.clone(); + let project = project.clone(); + async move |this, cx| { + if was_idle { + log::debug!("refetching edit prediction context after idle"); + } else { + cx.background_executor() + .timer(Self::CONTEXT_RETRIEVAL_DEBOUNCE_DURATION) + .await; + log::debug!("refetching edit prediction context after pause"); + } + this.update(cx, |this, cx| { + let task = this.refresh_context(project.clone(), buffer, cursor_position, cx); -impl ZetaEditPredictionProvider { - pub const THROTTLE_TIMEOUT: Duration = Duration::from_millis(300); + if let Some(zeta_project) = this.projects.get_mut(&project.entity_id()) { + zeta_project.refresh_context_task = Some(task.log_err()); + }; + }) + .ok() + } + })); + } - pub fn new( - zeta: Entity, + // Refresh the related excerpts asynchronously. Ensure the task runs to completion, + // and avoid spawning more than one concurrent task. + pub fn refresh_context( + &mut self, project: Entity, - singleton_buffer: Option>, + buffer: Entity, + cursor_position: language::Anchor, cx: &mut Context, - ) -> Self { - cx.on_release(|this, cx| { - this.take_current_edit_prediction(cx); - }) - .detach(); + ) -> Task> { + let Some(zeta_project) = self.projects.get(&project.entity_id()) else { + return Task::ready(anyhow::Ok(())); + }; - Self { - zeta, - singleton_buffer, - pending_completions: ArrayVec::new(), - canceled_completions: HashMap::default(), - next_pending_completion_id: 0, - current_completion: None, - last_request_timestamp: Instant::now(), - project, + let ContextMode::Agentic(options) = &self.options().context else { + return Task::ready(anyhow::Ok(())); + }; + + let snapshot = buffer.read(cx).snapshot(); + let cursor_point = cursor_position.to_point(&snapshot); + let Some(cursor_excerpt) = EditPredictionExcerpt::select_from_buffer( + cursor_point, + &snapshot, + &options.excerpt, + None, + ) else { + return Task::ready(Ok(())); + }; + + let app_version = AppVersion::global(cx); + let client = self.client.clone(); + let llm_token = self.llm_token.clone(); + let debug_tx = self.debug_tx.clone(); + let current_file_path: Arc = snapshot + .file() + .map(|f| f.full_path(cx).into()) + .unwrap_or_else(|| Path::new("untitled").into()); + + let prompt = match cloud_zeta2_prompt::retrieval_prompt::build_prompt( + predict_edits_v3::PlanContextRetrievalRequest { + excerpt: cursor_excerpt.text(&snapshot).body, + excerpt_path: current_file_path, + excerpt_line_range: cursor_excerpt.line_range, + cursor_file_max_row: Line(snapshot.max_point().row), + events: zeta_project.events(cx), + }, + ) { + Ok(prompt) => prompt, + Err(err) => { + return Task::ready(Err(err)); + } + }; + + if let Some(debug_tx) = &debug_tx { + debug_tx + .unbounded_send(ZetaDebugInfo::ContextRetrievalStarted( + ZetaContextRetrievalStartedDebugInfo { + project: project.clone(), + timestamp: Instant::now(), + search_prompt: prompt.clone(), + }, + )) + .ok(); } - } - fn take_current_edit_prediction(&mut self, cx: &mut App) { - if let Some(completion) = self.current_completion.take() { - if !completion.was_accepted { - self.zeta.update(cx, |zeta, cx| { - zeta.discard_completion(completion.completion.id, completion.was_shown, cx); - }); + pub static TOOL_SCHEMA: LazyLock<(serde_json::Value, String)> = LazyLock::new(|| { + let schema = language_model::tool_schema::root_schema_for::( + language_model::LanguageModelToolSchemaFormat::JsonSchemaSubset, + ); + + let description = schema + .get("description") + .and_then(|description| description.as_str()) + .unwrap() + .to_string(); + + (schema.into(), description) + }); + + let (tool_schema, tool_description) = TOOL_SCHEMA.clone(); + + let request = open_ai::Request { + model: CONTEXT_RETRIEVAL_MODEL_ID.clone(), + messages: vec![open_ai::RequestMessage::User { + content: open_ai::MessageContent::Plain(prompt), + }], + stream: false, + max_completion_tokens: None, + stop: Default::default(), + temperature: 0.7, + tool_choice: None, + parallel_tool_calls: None, + tools: vec![open_ai::ToolDefinition::Function { + function: FunctionDefinition { + name: cloud_zeta2_prompt::retrieval_prompt::TOOL_NAME.to_string(), + description: Some(tool_description), + parameters: Some(tool_schema), + }, + }], + prompt_cache_key: None, + reasoning_effort: None, + }; + + #[cfg(feature = "eval-support")] + let eval_cache = self.eval_cache.clone(); + + cx.spawn(async move |this, cx| { + log::trace!("Sending search planning request"); + let response = Self::send_raw_llm_request( + request, + client, + llm_token, + app_version, + #[cfg(feature = "eval-support")] + eval_cache.clone(), + #[cfg(feature = "eval-support")] + EvalCacheEntryKind::Context, + ) + .await; + let mut response = Self::handle_api_response(&this, response, cx)?; + log::trace!("Got search planning response"); + + let choice = response + .choices + .pop() + .context("No choices in retrieval response")?; + let open_ai::RequestMessage::Assistant { + content: _, + tool_calls, + } = choice.message + else { + anyhow::bail!("Retrieval response didn't include an assistant message"); + }; + + let mut queries: Vec = Vec::new(); + for tool_call in tool_calls { + let open_ai::ToolCallContent::Function { function } = tool_call.content; + if function.name != cloud_zeta2_prompt::retrieval_prompt::TOOL_NAME { + log::warn!( + "Context retrieval response tried to call an unknown tool: {}", + function.name + ); + + continue; + } + + let input: SearchToolInput = serde_json::from_str(&function.arguments) + .with_context(|| format!("invalid search json {}", &function.arguments))?; + queries.extend(input.queries); } - } - } -} -impl edit_prediction::EditPredictionProvider for ZetaEditPredictionProvider { - fn name() -> &'static str { - "zed-predict" - } + if let Some(debug_tx) = &debug_tx { + debug_tx + .unbounded_send(ZetaDebugInfo::SearchQueriesGenerated( + ZetaSearchQueryDebugInfo { + project: project.clone(), + timestamp: Instant::now(), + search_queries: queries.clone(), + }, + )) + .ok(); + } - fn display_name() -> &'static str { - "Zed's Edit Predictions" - } + log::trace!("Running retrieval search: {queries:#?}"); - fn show_completions_in_menu() -> bool { - true - } + let related_excerpts_result = retrieval_search::run_retrieval_searches( + queries, + project.clone(), + #[cfg(feature = "eval-support")] + eval_cache, + cx, + ) + .await; - fn show_tab_accept_marker() -> bool { - true - } + log::trace!("Search queries executed"); + + if let Some(debug_tx) = &debug_tx { + debug_tx + .unbounded_send(ZetaDebugInfo::SearchQueriesExecuted( + ZetaContextRetrievalDebugInfo { + project: project.clone(), + timestamp: Instant::now(), + }, + )) + .ok(); + } - fn data_collection_state(&self, cx: &App) -> DataCollectionState { - if let Some(buffer) = &self.singleton_buffer - && let Some(file) = buffer.read(cx).file() - { - let is_project_open_source = self.zeta.read(cx).is_file_open_source(file, cx); - if self.zeta.read(cx).data_collection_choice.is_enabled() { - DataCollectionState::Enabled { - is_project_open_source, + this.update(cx, |this, _cx| { + let Some(zeta_project) = this.projects.get_mut(&project.entity_id()) else { + return Ok(()); + }; + zeta_project.refresh_context_task.take(); + if let Some(debug_tx) = &this.debug_tx { + debug_tx + .unbounded_send(ZetaDebugInfo::ContextRetrievalFinished( + ZetaContextRetrievalDebugInfo { + project, + timestamp: Instant::now(), + }, + )) + .ok(); } - } else { - DataCollectionState::Disabled { - is_project_open_source, + match related_excerpts_result { + Ok(excerpts) => { + zeta_project.context = Some(excerpts); + Ok(()) + } + Err(error) => Err(error), } - } - } else { - return DataCollectionState::Disabled { - is_project_open_source: false, - }; - } - } - - fn toggle_data_collection(&mut self, cx: &mut App) { - self.zeta - .update(cx, |zeta, cx| zeta.toggle_data_collection_choice(cx)); - } - - fn usage(&self, cx: &App) -> Option { - self.zeta.read(cx).usage(cx) - } - - fn is_enabled( - &self, - _buffer: &Entity, - _cursor_position: language::Anchor, - _cx: &App, - ) -> bool { - true - } - fn is_refreshing(&self, _cx: &App) -> bool { - !self.pending_completions.is_empty() + })? + }) } - fn refresh( + pub fn set_context( &mut self, - buffer: Entity, - position: language::Anchor, - _debounce: bool, - cx: &mut Context, + project: Entity, + context: HashMap, Vec>>, ) { - if self.zeta.read(cx).update_required { - return; + if let Some(zeta_project) = self.projects.get_mut(&project.entity_id()) { + zeta_project.context = Some(context); } + } - if self - .zeta - .read(cx) - .user_store - .read_with(cx, |user_store, _cx| { - user_store.account_too_young() || user_store.has_overdue_invoices() - }) - { - return; + fn gather_nearby_diagnostics( + cursor_offset: usize, + diagnostic_sets: &[(LanguageServerId, DiagnosticSet)], + snapshot: &BufferSnapshot, + max_diagnostics_bytes: usize, + ) -> (Vec, bool) { + // TODO: Could make this more efficient + let mut diagnostic_groups = Vec::new(); + for (language_server_id, diagnostics) in diagnostic_sets { + let mut groups = Vec::new(); + diagnostics.groups(*language_server_id, &mut groups, &snapshot); + diagnostic_groups.extend( + groups + .into_iter() + .map(|(_, group)| group.resolve::(&snapshot)), + ); } - if let Some(current_completion) = self.current_completion.as_ref() { - let snapshot = buffer.read(cx).snapshot(); - if current_completion - .completion - .interpolate(&snapshot) - .is_some() - { - return; + // sort by proximity to cursor + diagnostic_groups.sort_by_key(|group| { + let range = &group.entries[group.primary_ix].range; + if range.start >= cursor_offset { + range.start - cursor_offset + } else if cursor_offset >= range.end { + cursor_offset - range.end + } else { + (cursor_offset - range.start).min(range.end - cursor_offset) + } + }); + + let mut results = Vec::new(); + let mut diagnostic_groups_truncated = false; + let mut diagnostics_byte_count = 0; + for group in diagnostic_groups { + let raw_value = serde_json::value::to_raw_value(&group).unwrap(); + diagnostics_byte_count += raw_value.get().len(); + if diagnostics_byte_count > max_diagnostics_bytes { + diagnostic_groups_truncated = true; + break; } + results.push(predict_edits_v3::DiagnosticGroup(raw_value)); } - let pending_completion_id = self.next_pending_completion_id; - self.next_pending_completion_id += 1; - let last_request_timestamp = self.last_request_timestamp; + (results, diagnostic_groups_truncated) + } - let project = self.project.clone(); - let task = cx.spawn(async move |this, cx| { - if let Some(timeout) = (last_request_timestamp + Self::THROTTLE_TIMEOUT) - .checked_duration_since(Instant::now()) - { - cx.background_executor().timer(timeout).await; - } + // TODO: Dedupe with similar code in request_prediction? + pub fn cloud_request_for_zeta_cli( + &mut self, + project: &Entity, + buffer: &Entity, + position: language::Anchor, + cx: &mut Context, + ) -> Task> { + let project_state = self.projects.get(&project.entity_id()); + + let index_state = project_state.and_then(|state| { + state + .syntax_index + .as_ref() + .map(|index| index.read_with(cx, |index, _cx| index.state().clone())) + }); + let options = self.options.clone(); + let snapshot = buffer.read(cx).snapshot(); + let Some(excerpt_path) = snapshot.file().map(|path| path.full_path(cx)) else { + return Task::ready(Err(anyhow!("No file path for excerpt"))); + }; + let worktree_snapshots = project + .read(cx) + .worktrees(cx) + .map(|worktree| worktree.read(cx).snapshot()) + .collect::>(); - let completion_request = this.update(cx, |this, cx| { - this.last_request_timestamp = Instant::now(); - this.zeta.update(cx, |zeta, cx| { - zeta.request_completion(&project, &buffer, position, cx) - }) - }); + let parent_abs_path = project::File::from_dyn(buffer.read(cx).file()).and_then(|f| { + let mut path = f.worktree.read(cx).absolutize(&f.path); + if path.pop() { Some(path) } else { None } + }); - let completion = match completion_request { - Ok(completion_request) => { - let completion_request = completion_request.await; - completion_request.map(|c| { - c.map(|completion| CurrentEditPrediction { - buffer_id: buffer.entity_id(), - completion, - was_shown: false, - was_accepted: false, - }) - }) - } - Err(error) => Err(error), + cx.background_spawn(async move { + let index_state = if let Some(index_state) = index_state { + Some(index_state.lock_owned().await) + } else { + None }; - let discarded = this - .update(cx, |this, cx| { - if this - .pending_completions - .first() - .is_some_and(|completion| completion.id == pending_completion_id) - { - this.pending_completions.remove(0); - } else { - if let Some(discarded) = this.pending_completions.drain(..).next() { - this.canceled_completions - .insert(discarded.id, discarded.task); - } - } - - let canceled = this.canceled_completions.remove(&pending_completion_id); + let cursor_point = position.to_point(&snapshot); - if canceled.is_some() - && let Ok(Some(new_completion)) = &completion - { - this.zeta.update(cx, |zeta, cx| { - zeta.discard_completion(new_completion.completion.id, false, cx); - }); - return true; + let debug_info = true; + EditPredictionContext::gather_context( + cursor_point, + &snapshot, + parent_abs_path.as_deref(), + match &options.context { + ContextMode::Agentic(_) => { + // TODO + panic!("Llm mode not supported in zeta cli yet"); } - - cx.notify(); - false - }) - .ok() - .unwrap_or(true); - - if discarded { - return; - } - - let Some(new_completion) = completion - .context("edit prediction failed") - .log_err() - .flatten() - else { - return; - }; - - this.update(cx, |this, cx| { - if let Some(old_completion) = this.current_completion.as_ref() { - let snapshot = buffer.read(cx).snapshot(); - if new_completion.should_replace_completion(old_completion, &snapshot) { - this.zeta.update(cx, |zeta, cx| { - zeta.completion_shown(&new_completion.completion, cx); - }); - this.take_current_edit_prediction(cx); - this.current_completion = Some(new_completion); + ContextMode::Syntax(edit_prediction_context_options) => { + edit_prediction_context_options } - } else { - this.zeta.update(cx, |zeta, cx| { - zeta.completion_shown(&new_completion.completion, cx); - }); - this.current_completion = Some(new_completion); - } - - cx.notify(); + }, + index_state.as_deref(), + ) + .context("Failed to select excerpt") + .map(|context| { + make_syntax_context_cloud_request( + excerpt_path.into(), + context, + // TODO pass everything + Vec::new(), + false, + Vec::new(), + false, + None, + debug_info, + &worktree_snapshots, + index_state.as_deref(), + Some(options.max_prompt_bytes), + options.prompt_format, + ) }) - .ok(); - }); - - // We always maintain at most two pending completions. When we already - // have two, we replace the newest one. - if self.pending_completions.len() <= 1 { - self.pending_completions.push(PendingCompletion { - id: pending_completion_id, - task, - }); - } else if self.pending_completions.len() == 2 { - if let Some(discarded) = self.pending_completions.pop() { - self.canceled_completions - .insert(discarded.id, discarded.task); - } - self.pending_completions.push(PendingCompletion { - id: pending_completion_id, - task, - }); - } + }) } - fn cycle( + pub fn wait_for_initial_indexing( &mut self, - _buffer: Entity, - _cursor_position: language::Anchor, - _direction: edit_prediction::Direction, - _cx: &mut Context, - ) { - // Right now we don't support cycling. + project: &Entity, + cx: &mut Context, + ) -> Task> { + let zeta_project = self.get_or_init_zeta_project(project, cx); + if let Some(syntax_index) = &zeta_project.syntax_index { + syntax_index.read(cx).wait_for_initial_file_indexing(cx) + } else { + Task::ready(Ok(())) + } } - fn accept(&mut self, cx: &mut Context) { - let completion = self.current_completion.as_mut(); - if let Some(completion) = completion { - completion.was_accepted = true; - self.zeta - .update(cx, |zeta, cx| { - zeta.accept_edit_prediction(completion.completion.id, cx) - }) - .detach(); + fn is_file_open_source( + &self, + project: &Entity, + file: &Arc, + cx: &App, + ) -> bool { + if !file.is_local() || file.is_private() { + return false; } - self.pending_completions.clear(); + let Some(zeta_project) = self.projects.get(&project.entity_id()) else { + return false; + }; + zeta_project + .license_detection_watchers + .get(&file.worktree_id(cx)) + .as_ref() + .is_some_and(|watcher| watcher.is_project_open_source()) } - fn discard(&mut self, cx: &mut Context) { - self.pending_completions.clear(); - self.take_current_edit_prediction(cx); + fn can_collect_file(&self, project: &Entity, file: &Arc, cx: &App) -> bool { + self.data_collection_choice.is_enabled() && self.is_file_open_source(project, file, cx) } - fn did_show(&mut self, _cx: &mut Context) { - if let Some(current_completion) = self.current_completion.as_mut() { - current_completion.was_shown = true; + fn can_collect_events(&self, events: &[Arc]) -> bool { + if !self.data_collection_choice.is_enabled() { + return false; } + events.iter().all(|event| { + matches!( + event.as_ref(), + Event::BufferChange { + in_open_source_repo: true, + .. + } + ) + }) } - fn suggest( - &mut self, - buffer: &Entity, - cursor_position: language::Anchor, - cx: &mut Context, - ) -> Option { - let CurrentEditPrediction { - buffer_id, - completion, - .. - } = self.current_completion.as_mut()?; - - // Invalidate previous completion if it was generated for a different buffer. - if *buffer_id != buffer.entity_id() { - self.take_current_edit_prediction(cx); - return None; - } - - let buffer = buffer.read(cx); - let Some(edits) = completion.interpolate(&buffer.snapshot()) else { - self.take_current_edit_prediction(cx); - return None; - }; - - let cursor_row = cursor_position.to_point(buffer).row; - let (closest_edit_ix, (closest_edit_range, _)) = - edits.iter().enumerate().min_by_key(|(_, (range, _))| { - let distance_from_start = cursor_row.abs_diff(range.start.to_point(buffer).row); - let distance_from_end = cursor_row.abs_diff(range.end.to_point(buffer).row); - cmp::min(distance_from_start, distance_from_end) - })?; + fn load_data_collection_choice() -> DataCollectionChoice { + let choice = KEY_VALUE_STORE + .read_kvp(ZED_PREDICT_DATA_COLLECTION_CHOICE) + .log_err() + .flatten(); - let mut edit_start_ix = closest_edit_ix; - for (range, _) in edits[..edit_start_ix].iter().rev() { - let distance_from_closest_edit = - closest_edit_range.start.to_point(buffer).row - range.end.to_point(buffer).row; - if distance_from_closest_edit <= 1 { - edit_start_ix -= 1; - } else { - break; + match choice.as_deref() { + Some("true") => DataCollectionChoice::Enabled, + Some("false") => DataCollectionChoice::Disabled, + Some(_) => { + log::error!("unknown value in '{ZED_PREDICT_DATA_COLLECTION_CHOICE}'"); + DataCollectionChoice::NotAnswered } + None => DataCollectionChoice::NotAnswered, } + } - let mut edit_end_ix = closest_edit_ix + 1; - for (range, _) in &edits[edit_end_ix..] { - let distance_from_closest_edit = - range.start.to_point(buffer).row - closest_edit_range.end.to_point(buffer).row; - if distance_from_closest_edit <= 1 { - edit_end_ix += 1; - } else { - break; - } - } + pub fn shown_predictions(&self) -> impl DoubleEndedIterator { + self.shown_predictions.iter() + } - Some(edit_prediction::EditPrediction::Local { - id: Some(completion.id.to_string().into()), - edits: edits[edit_start_ix..edit_end_ix].to_vec(), - edit_preview: Some(completion.edit_preview.clone()), - }) + pub fn shown_completions_len(&self) -> usize { + self.shown_predictions.len() } -} -/// Typical number of string bytes per token for the purposes of limiting model input. This is -/// intentionally low to err on the side of underestimating limits. -const BYTES_PER_TOKEN_GUESS: usize = 3; + pub fn is_prediction_rated(&self, id: &EditPredictionId) -> bool { + self.rated_predictions.contains(id) + } -fn guess_token_count(bytes: usize) -> usize { - bytes / BYTES_PER_TOKEN_GUESS + pub fn rate_prediction( + &mut self, + prediction: &EditPrediction, + rating: EditPredictionRating, + feedback: String, + cx: &mut Context, + ) { + self.rated_predictions.insert(prediction.id.clone()); + telemetry::event!( + "Edit Prediction Rated", + rating, + inputs = prediction.inputs, + output = prediction.edit_preview.as_unified_diff(&prediction.edits), + feedback + ); + self.client.telemetry().flush_events().detach(); + cx.notify(); + } } -#[cfg(test)] -mod tests { - use client::test::FakeServer; - use clock::{FakeSystemClock, ReplicaId}; - use cloud_api_types::{CreateLlmTokenResponse, LlmToken}; - use gpui::TestAppContext; - use http_client::FakeHttpClient; - use indoc::indoc; - use language::Point; - use parking_lot::Mutex; - use serde_json::json; - use settings::SettingsStore; - use util::{path, rel_path::rel_path}; - - use super::*; - - const BSD_0_TXT: &str = include_str!("../license_examples/0bsd.txt"); +pub fn text_from_response(mut res: open_ai::Response) -> Option { + let choice = res.choices.pop()?; + let output_text = match choice.message { + open_ai::RequestMessage::Assistant { + content: Some(open_ai::MessageContent::Plain(content)), + .. + } => content, + open_ai::RequestMessage::Assistant { + content: Some(open_ai::MessageContent::Multipart(mut content)), + .. + } => { + if content.is_empty() { + log::error!("No output from Baseten completion response"); + return None; + } - #[gpui::test] - async fn test_edit_prediction_basic_interpolation(cx: &mut TestAppContext) { - let buffer = cx.new(|cx| Buffer::local("Lorem ipsum dolor", cx)); - let edits: Arc<[(Range, Arc)]> = cx.update(|cx| { - to_completion_edits([(2..5, "REM".into()), (9..11, "".into())], &buffer, cx).into() - }); + match content.remove(0) { + open_ai::MessagePart::Text { text } => text, + open_ai::MessagePart::Image { .. } => { + log::error!("Expected text, got an image"); + return None; + } + } + } + _ => { + log::error!("Invalid response message: {:?}", choice.message); + return None; + } + }; + Some(output_text) +} - let edit_preview = cx - .read(|cx| buffer.read(cx).preview_edits(edits.clone(), cx)) - .await; +#[derive(Error, Debug)] +#[error( + "You must update to Zed version {minimum_version} or higher to continue using edit predictions." +)] +pub struct ZedUpdateRequiredError { + minimum_version: Version, +} - let completion = EditPrediction { - edits, - edit_preview, - path: Path::new("").into(), - snapshot: cx.read(|cx| buffer.read(cx).snapshot()), - id: EditPredictionId(Uuid::new_v4()), - excerpt_range: 0..0, - cursor_offset: 0, - input_outline: "".into(), - input_events: "".into(), - input_excerpt: "".into(), - output_excerpt: "".into(), - buffer_snapshotted_at: Instant::now(), - response_received_at: Instant::now(), +fn make_syntax_context_cloud_request( + excerpt_path: Arc, + context: EditPredictionContext, + events: Vec>, + can_collect_data: bool, + diagnostic_groups: Vec, + diagnostic_groups_truncated: bool, + git_info: Option, + debug_info: bool, + worktrees: &Vec, + index_state: Option<&SyntaxIndexState>, + prompt_max_bytes: Option, + prompt_format: PromptFormat, +) -> predict_edits_v3::PredictEditsRequest { + let mut signatures = Vec::new(); + let mut declaration_to_signature_index = HashMap::default(); + let mut referenced_declarations = Vec::new(); + + for snippet in context.declarations { + let project_entry_id = snippet.declaration.project_entry_id(); + let Some(path) = worktrees.iter().find_map(|worktree| { + worktree.entry_for_id(project_entry_id).map(|entry| { + let mut full_path = RelPathBuf::new(); + full_path.push(worktree.root_name()); + full_path.push(&entry.path); + full_path + }) + }) else { + continue; }; - cx.update(|cx| { - assert_eq!( - from_completion_edits( - &completion.interpolate(&buffer.read(cx).snapshot()).unwrap(), - &buffer, - cx - ), - vec![(2..5, "REM".into()), (9..11, "".into())] - ); - - buffer.update(cx, |buffer, cx| buffer.edit([(2..5, "")], None, cx)); - assert_eq!( - from_completion_edits( - &completion.interpolate(&buffer.read(cx).snapshot()).unwrap(), - &buffer, - cx - ), - vec![(2..2, "REM".into()), (6..8, "".into())] - ); - - buffer.update(cx, |buffer, cx| buffer.undo(cx)); - assert_eq!( - from_completion_edits( - &completion.interpolate(&buffer.read(cx).snapshot()).unwrap(), - &buffer, - cx - ), - vec![(2..5, "REM".into()), (9..11, "".into())] - ); + let parent_index = index_state.and_then(|index_state| { + snippet.declaration.parent().and_then(|parent| { + add_signature( + parent, + &mut declaration_to_signature_index, + &mut signatures, + index_state, + ) + }) + }); - buffer.update(cx, |buffer, cx| buffer.edit([(2..5, "R")], None, cx)); - assert_eq!( - from_completion_edits( - &completion.interpolate(&buffer.read(cx).snapshot()).unwrap(), - &buffer, - cx - ), - vec![(3..3, "EM".into()), (7..9, "".into())] - ); + let (text, text_is_truncated) = snippet.declaration.item_text(); + referenced_declarations.push(predict_edits_v3::ReferencedDeclaration { + path: path.as_std_path().into(), + text: text.into(), + range: snippet.declaration.item_line_range(), + text_is_truncated, + signature_range: snippet.declaration.signature_range_in_item_text(), + parent_index, + signature_score: snippet.score(DeclarationStyle::Signature), + declaration_score: snippet.score(DeclarationStyle::Declaration), + score_components: snippet.components, + }); + } - buffer.update(cx, |buffer, cx| buffer.edit([(3..3, "E")], None, cx)); - assert_eq!( - from_completion_edits( - &completion.interpolate(&buffer.read(cx).snapshot()).unwrap(), - &buffer, - cx - ), - vec![(4..4, "M".into()), (8..10, "".into())] - ); + let excerpt_parent = index_state.and_then(|index_state| { + context + .excerpt + .parent_declarations + .last() + .and_then(|(parent, _)| { + add_signature( + *parent, + &mut declaration_to_signature_index, + &mut signatures, + index_state, + ) + }) + }); + + predict_edits_v3::PredictEditsRequest { + excerpt_path, + excerpt: context.excerpt_text.body, + excerpt_line_range: context.excerpt.line_range, + excerpt_range: context.excerpt.range, + cursor_point: predict_edits_v3::Point { + line: predict_edits_v3::Line(context.cursor_point.row), + column: context.cursor_point.column, + }, + referenced_declarations, + included_files: vec![], + signatures, + excerpt_parent, + events, + can_collect_data, + diagnostic_groups, + diagnostic_groups_truncated, + git_info, + debug_info, + prompt_max_bytes, + prompt_format, + } +} - buffer.update(cx, |buffer, cx| buffer.edit([(4..4, "M")], None, cx)); - assert_eq!( - from_completion_edits( - &completion.interpolate(&buffer.read(cx).snapshot()).unwrap(), - &buffer, - cx - ), - vec![(9..11, "".into())] - ); +fn add_signature( + declaration_id: DeclarationId, + declaration_to_signature_index: &mut HashMap, + signatures: &mut Vec, + index: &SyntaxIndexState, +) -> Option { + if let Some(signature_index) = declaration_to_signature_index.get(&declaration_id) { + return Some(*signature_index); + } + let Some(parent_declaration) = index.declaration(declaration_id) else { + log::error!("bug: missing parent declaration"); + return None; + }; + let parent_index = parent_declaration.parent().and_then(|parent| { + add_signature(parent, declaration_to_signature_index, signatures, index) + }); + let (text, text_is_truncated) = parent_declaration.signature_text(); + let signature_index = signatures.len(); + signatures.push(Signature { + text: text.into(), + text_is_truncated, + parent_index, + range: parent_declaration.signature_line_range(), + }); + declaration_to_signature_index.insert(declaration_id, signature_index); + Some(signature_index) +} - buffer.update(cx, |buffer, cx| buffer.edit([(4..5, "")], None, cx)); - assert_eq!( - from_completion_edits( - &completion.interpolate(&buffer.read(cx).snapshot()).unwrap(), - &buffer, - cx - ), - vec![(4..4, "M".into()), (8..10, "".into())] - ); +#[cfg(feature = "eval-support")] +pub type EvalCacheKey = (EvalCacheEntryKind, u64); - buffer.update(cx, |buffer, cx| buffer.edit([(8..10, "")], None, cx)); - assert_eq!( - from_completion_edits( - &completion.interpolate(&buffer.read(cx).snapshot()).unwrap(), - &buffer, - cx - ), - vec![(4..4, "M".into())] - ); +#[cfg(feature = "eval-support")] +#[derive(Debug, Clone, Copy, PartialEq)] +pub enum EvalCacheEntryKind { + Context, + Search, + Prediction, +} - buffer.update(cx, |buffer, cx| buffer.edit([(4..6, "")], None, cx)); - assert_eq!(completion.interpolate(&buffer.read(cx).snapshot()), None); - }) +#[cfg(feature = "eval-support")] +impl std::fmt::Display for EvalCacheEntryKind { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + EvalCacheEntryKind::Search => write!(f, "search"), + EvalCacheEntryKind::Context => write!(f, "context"), + EvalCacheEntryKind::Prediction => write!(f, "prediction"), + } } +} - #[gpui::test] - async fn test_clean_up_diff(cx: &mut TestAppContext) { - init_test(cx); - - assert_eq!( - apply_edit_prediction( - indoc! {" - fn main() { - let word_1 = \"lorem\"; - let range = word.len()..word.len(); - } - "}, - indoc! {" - <|editable_region_start|> - fn main() { - let word_1 = \"lorem\"; - let range = word_1.len()..word_1.len(); - } +#[cfg(feature = "eval-support")] +pub trait EvalCache: Send + Sync { + fn read(&self, key: EvalCacheKey) -> Option; + fn write(&self, key: EvalCacheKey, input: &str, value: &str); +} - <|editable_region_end|> - "}, - cx, - ) - .await, - indoc! {" - fn main() { - let word_1 = \"lorem\"; - let range = word_1.len()..word_1.len(); - } - "}, - ); +#[derive(Debug, Clone, Copy)] +pub enum DataCollectionChoice { + NotAnswered, + Enabled, + Disabled, +} - assert_eq!( - apply_edit_prediction( - indoc! {" - fn main() { - let story = \"the quick\" - } - "}, - indoc! {" - <|editable_region_start|> - fn main() { - let story = \"the quick brown fox jumps over the lazy dog\"; - } +impl DataCollectionChoice { + pub fn is_enabled(self) -> bool { + match self { + Self::Enabled => true, + Self::NotAnswered | Self::Disabled => false, + } + } - <|editable_region_end|> - "}, - cx, - ) - .await, - indoc! {" - fn main() { - let story = \"the quick brown fox jumps over the lazy dog\"; - } - "}, - ); + pub fn is_answered(self) -> bool { + match self { + Self::Enabled | Self::Disabled => true, + Self::NotAnswered => false, + } } - #[gpui::test] - async fn test_edit_prediction_end_of_buffer(cx: &mut TestAppContext) { - init_test(cx); - - let buffer_content = "lorem\n"; - let completion_response = indoc! {" - ```animals.js - <|start_of_file|> - <|editable_region_start|> - lorem - ipsum - <|editable_region_end|> - ```"}; + #[must_use] + pub fn toggle(&self) -> DataCollectionChoice { + match self { + Self::Enabled => Self::Disabled, + Self::Disabled => Self::Enabled, + Self::NotAnswered => Self::Enabled, + } + } +} - assert_eq!( - apply_edit_prediction(buffer_content, completion_response, cx).await, - "lorem\nipsum" - ); +impl From for DataCollectionChoice { + fn from(value: bool) -> Self { + match value { + true => DataCollectionChoice::Enabled, + false => DataCollectionChoice::Disabled, + } } +} - #[gpui::test] - async fn test_can_collect_data(cx: &mut TestAppContext) { - init_test(cx); +struct ZedPredictUpsell; - let fs = project::FakeFs::new(cx.executor()); - fs.insert_tree(path!("/project"), json!({ "LICENSE": BSD_0_TXT })) - .await; +impl Dismissable for ZedPredictUpsell { + const KEY: &'static str = "dismissed-edit-predict-upsell"; - let project = Project::test(fs.clone(), [path!("/project").as_ref()], cx).await; - let buffer = project - .update(cx, |project, cx| { - project.open_local_buffer(path!("/project/src/main.rs"), cx) - }) - .await - .unwrap(); + fn dismissed() -> bool { + // To make this backwards compatible with older versions of Zed, we + // check if the user has seen the previous Edit Prediction Onboarding + // before, by checking the data collection choice which was written to + // the database once the user clicked on "Accept and Enable" + if KEY_VALUE_STORE + .read_kvp(ZED_PREDICT_DATA_COLLECTION_CHOICE) + .log_err() + .is_some_and(|s| s.is_some()) + { + return true; + } - let (zeta, captured_request, _) = make_test_zeta(&project, cx).await; - zeta.update(cx, |zeta, _cx| { - zeta.data_collection_choice = DataCollectionChoice::Enabled - }); + KEY_VALUE_STORE + .read_kvp(Self::KEY) + .log_err() + .is_some_and(|s| s.is_some()) + } +} - run_edit_prediction(&buffer, &project, &zeta, cx).await; - assert_eq!( - captured_request.lock().clone().unwrap().can_collect_data, - true - ); +pub fn should_show_upsell_modal() -> bool { + !ZedPredictUpsell::dismissed() +} + +pub fn init(cx: &mut App) { + feature_gate_predict_edits_actions(cx); - zeta.update(cx, |zeta, _cx| { - zeta.data_collection_choice = DataCollectionChoice::Disabled + cx.observe_new(move |workspace: &mut Workspace, _, _cx| { + workspace.register_action(|workspace, _: &RateCompletions, window, cx| { + if cx.has_flag::() { + RatePredictionsModal::toggle(workspace, window, cx); + } }); - run_edit_prediction(&buffer, &project, &zeta, cx).await; - assert_eq!( - captured_request.lock().clone().unwrap().can_collect_data, - false + workspace.register_action( + move |workspace, _: &zed_actions::OpenZedPredictOnboarding, window, cx| { + ZedPredictModal::toggle( + workspace, + workspace.user_store().clone(), + workspace.client().clone(), + window, + cx, + ) + }, ); - } - #[gpui::test] - async fn test_no_data_collection_for_remote_file(cx: &mut TestAppContext) { - init_test(cx); - - let fs = project::FakeFs::new(cx.executor()); - let project = Project::test(fs.clone(), [], cx).await; - - let buffer = cx.new(|_cx| { - Buffer::remote( - language::BufferId::new(1).unwrap(), - ReplicaId::new(1), - language::Capability::ReadWrite, - "fn main() {\n println!(\"Hello\");\n}", - ) + workspace.register_action(|workspace, _: &ResetOnboarding, _window, cx| { + update_settings_file(workspace.app_state().fs.clone(), cx, move |settings, _| { + settings + .project + .all_languages + .features + .get_or_insert_default() + .edit_prediction_provider = Some(EditPredictionProvider::None) + }); }); + }) + .detach(); +} - let (zeta, captured_request, _) = make_test_zeta(&project, cx).await; - zeta.update(cx, |zeta, _cx| { - zeta.data_collection_choice = DataCollectionChoice::Enabled +fn feature_gate_predict_edits_actions(cx: &mut App) { + let rate_completion_action_types = [TypeId::of::()]; + let reset_onboarding_action_types = [TypeId::of::()]; + let zeta_all_action_types = [ + TypeId::of::(), + TypeId::of::(), + zed_actions::OpenZedPredictOnboarding.type_id(), + TypeId::of::(), + TypeId::of::(), + TypeId::of::(), + TypeId::of::(), + TypeId::of::(), + ]; + + CommandPaletteFilter::update_global(cx, |filter, _cx| { + filter.hide_action_types(&rate_completion_action_types); + filter.hide_action_types(&reset_onboarding_action_types); + filter.hide_action_types(&[zed_actions::OpenZedPredictOnboarding.type_id()]); + }); + + cx.observe_global::(move |cx| { + let is_ai_disabled = DisableAiSettings::get_global(cx).disable_ai; + let has_feature_flag = cx.has_flag::(); + + CommandPaletteFilter::update_global(cx, |filter, _cx| { + if is_ai_disabled { + filter.hide_action_types(&zeta_all_action_types); + } else if has_feature_flag { + filter.show_action_types(&rate_completion_action_types); + } else { + filter.hide_action_types(&rate_completion_action_types); + } }); + }) + .detach(); - run_edit_prediction(&buffer, &project, &zeta, cx).await; - assert_eq!( - captured_request.lock().clone().unwrap().can_collect_data, - false - ); - } + cx.observe_flag::(move |is_enabled, cx| { + if !DisableAiSettings::get_global(cx).disable_ai { + if is_enabled { + CommandPaletteFilter::update_global(cx, |filter, _cx| { + filter.show_action_types(&rate_completion_action_types); + }); + } else { + CommandPaletteFilter::update_global(cx, |filter, _cx| { + filter.hide_action_types(&rate_completion_action_types); + }); + } + } + }) + .detach(); +} - #[gpui::test] - async fn test_no_data_collection_for_private_file(cx: &mut TestAppContext) { - init_test(cx); +#[cfg(test)] +mod tests { + use std::{path::Path, sync::Arc}; + + use client::UserStore; + use clock::FakeSystemClock; + use cloud_zeta2_prompt::retrieval_prompt::{SearchToolInput, SearchToolQuery}; + use futures::{ + AsyncReadExt, StreamExt, + channel::{mpsc, oneshot}, + }; + use gpui::{ + Entity, TestAppContext, + http_client::{FakeHttpClient, Response}, + prelude::*, + }; + use indoc::indoc; + use language::OffsetRangeExt as _; + use open_ai::Usage; + use pretty_assertions::{assert_eq, assert_matches}; + use project::{FakeFs, Project}; + use serde_json::json; + use settings::SettingsStore; + use util::path; + use uuid::Uuid; - let fs = project::FakeFs::new(cx.executor()); + use crate::{BufferEditPrediction, Zeta}; + + #[gpui::test] + async fn test_current_state(cx: &mut TestAppContext) { + let (zeta, mut req_rx) = init_test(cx); + let fs = FakeFs::new(cx.executor()); fs.insert_tree( - path!("/project"), + "/root", json!({ - "LICENSE": BSD_0_TXT, - ".env": "SECRET_KEY=secret" + "1.txt": "Hello!\nHow\nBye\n", + "2.txt": "Hola!\nComo\nAdios\n" }), ) .await; + let project = Project::test(fs, vec![path!("/root").as_ref()], cx).await; - let project = Project::test(fs.clone(), [path!("/project").as_ref()], cx).await; - let buffer = project + zeta.update(cx, |zeta, cx| { + zeta.register_project(&project, cx); + }); + + let buffer1 = project .update(cx, |project, cx| { - project.open_local_buffer("/project/.env", cx) + let path = project.find_project_path(path!("root/1.txt"), cx).unwrap(); + project.open_buffer(path, cx) }) .await .unwrap(); + let snapshot1 = buffer1.read_with(cx, |buffer, _cx| buffer.snapshot()); + let position = snapshot1.anchor_before(language::Point::new(1, 3)); - let (zeta, captured_request, _) = make_test_zeta(&project, cx).await; - zeta.update(cx, |zeta, _cx| { - zeta.data_collection_choice = DataCollectionChoice::Enabled - }); - - run_edit_prediction(&buffer, &project, &zeta, cx).await; - assert_eq!( - captured_request.lock().clone().unwrap().can_collect_data, - false - ); - } + // Prediction for current file - #[gpui::test] - async fn test_no_data_collection_for_untitled_buffer(cx: &mut TestAppContext) { - init_test(cx); + zeta.update(cx, |zeta, cx| { + zeta.refresh_prediction_from_buffer(project.clone(), buffer1.clone(), position, cx) + }); + let (_request, respond_tx) = req_rx.next().await.unwrap(); + + respond_tx + .send(model_response(indoc! {r" + --- a/root/1.txt + +++ b/root/1.txt + @@ ... @@ + Hello! + -How + +How are you? + Bye + "})) + .unwrap(); - let fs = project::FakeFs::new(cx.executor()); - let project = Project::test(fs.clone(), [], cx).await; - let buffer = cx.new(|cx| Buffer::local("", cx)); + cx.run_until_parked(); - let (zeta, captured_request, _) = make_test_zeta(&project, cx).await; - zeta.update(cx, |zeta, _cx| { - zeta.data_collection_choice = DataCollectionChoice::Enabled + zeta.read_with(cx, |zeta, cx| { + let prediction = zeta + .current_prediction_for_buffer(&buffer1, &project, cx) + .unwrap(); + assert_matches!(prediction, BufferEditPrediction::Local { .. }); }); - run_edit_prediction(&buffer, &project, &zeta, cx).await; - assert_eq!( - captured_request.lock().clone().unwrap().can_collect_data, - false - ); - } + // Context refresh + let refresh_task = zeta.update(cx, |zeta, cx| { + zeta.refresh_context(project.clone(), buffer1.clone(), position, cx) + }); + let (_request, respond_tx) = req_rx.next().await.unwrap(); + respond_tx + .send(open_ai::Response { + id: Uuid::new_v4().to_string(), + object: "response".into(), + created: 0, + model: "model".into(), + choices: vec![open_ai::Choice { + index: 0, + message: open_ai::RequestMessage::Assistant { + content: None, + tool_calls: vec![open_ai::ToolCall { + id: "search".into(), + content: open_ai::ToolCallContent::Function { + function: open_ai::FunctionContent { + name: cloud_zeta2_prompt::retrieval_prompt::TOOL_NAME + .to_string(), + arguments: serde_json::to_string(&SearchToolInput { + queries: Box::new([SearchToolQuery { + glob: "root/2.txt".to_string(), + syntax_node: vec![], + content: Some(".".into()), + }]), + }) + .unwrap(), + }, + }, + }], + }, + finish_reason: None, + }], + usage: Usage { + prompt_tokens: 0, + completion_tokens: 0, + total_tokens: 0, + }, + }) + .unwrap(); + refresh_task.await.unwrap(); - #[gpui::test] - async fn test_no_data_collection_when_closed_source(cx: &mut TestAppContext) { - init_test(cx); + zeta.update(cx, |zeta, cx| { + zeta.discard_current_prediction(&project, cx); + }); - let fs = project::FakeFs::new(cx.executor()); - fs.insert_tree(path!("/project"), json!({ "main.rs": "fn main() {}" })) - .await; + // Prediction for another file + zeta.update(cx, |zeta, cx| { + zeta.refresh_prediction_from_buffer(project.clone(), buffer1.clone(), position, cx) + }); + let (_request, respond_tx) = req_rx.next().await.unwrap(); + respond_tx + .send(model_response(indoc! {r#" + --- a/root/2.txt + +++ b/root/2.txt + Hola! + -Como + +Como estas? + Adios + "#})) + .unwrap(); + cx.run_until_parked(); + + zeta.read_with(cx, |zeta, cx| { + let prediction = zeta + .current_prediction_for_buffer(&buffer1, &project, cx) + .unwrap(); + assert_matches!( + prediction, + BufferEditPrediction::Jump { prediction } if prediction.snapshot.file().unwrap().full_path(cx) == Path::new(path!("root/2.txt")) + ); + }); - let project = Project::test(fs.clone(), [path!("/project").as_ref()], cx).await; - let buffer = project + let buffer2 = project .update(cx, |project, cx| { - project.open_local_buffer("/project/main.rs", cx) + let path = project.find_project_path(path!("root/2.txt"), cx).unwrap(); + project.open_buffer(path, cx) }) .await .unwrap(); - let (zeta, captured_request, _) = make_test_zeta(&project, cx).await; - zeta.update(cx, |zeta, _cx| { - zeta.data_collection_choice = DataCollectionChoice::Enabled + zeta.read_with(cx, |zeta, cx| { + let prediction = zeta + .current_prediction_for_buffer(&buffer2, &project, cx) + .unwrap(); + assert_matches!(prediction, BufferEditPrediction::Local { .. }); }); - - run_edit_prediction(&buffer, &project, &zeta, cx).await; - assert_eq!( - captured_request.lock().clone().unwrap().can_collect_data, - false - ); } #[gpui::test] - async fn test_data_collection_status_changes_on_move(cx: &mut TestAppContext) { - init_test(cx); - - let fs = project::FakeFs::new(cx.executor()); + async fn test_simple_request(cx: &mut TestAppContext) { + let (zeta, mut req_rx) = init_test(cx); + let fs = FakeFs::new(cx.executor()); fs.insert_tree( - path!("/open_source_worktree"), - json!({ "LICENSE": BSD_0_TXT, "main.rs": "" }), + "/root", + json!({ + "foo.md": "Hello!\nHow\nBye\n" + }), ) .await; - fs.insert_tree(path!("/closed_source_worktree"), json!({ "main.rs": "" })) - .await; + let project = Project::test(fs, vec![path!("/root").as_ref()], cx).await; - let project = Project::test( - fs.clone(), - [ - path!("/open_source_worktree").as_ref(), - path!("/closed_source_worktree").as_ref(), - ], - cx, - ) - .await; let buffer = project .update(cx, |project, cx| { - project.open_local_buffer(path!("/open_source_worktree/main.rs"), cx) + let path = project.find_project_path(path!("root/foo.md"), cx).unwrap(); + project.open_buffer(path, cx) }) .await .unwrap(); + let snapshot = buffer.read_with(cx, |buffer, _cx| buffer.snapshot()); + let position = snapshot.anchor_before(language::Point::new(1, 3)); - let (zeta, captured_request, _) = make_test_zeta(&project, cx).await; - zeta.update(cx, |zeta, _cx| { - zeta.data_collection_choice = DataCollectionChoice::Enabled + let prediction_task = zeta.update(cx, |zeta, cx| { + zeta.request_prediction(&project, &buffer, position, cx) }); - run_edit_prediction(&buffer, &project, &zeta, cx).await; - assert_eq!( - captured_request.lock().clone().unwrap().can_collect_data, - true - ); - - let closed_source_file = project - .update(cx, |project, cx| { - let worktree2 = project - .worktree_for_root_name("closed_source_worktree", cx) - .unwrap(); - worktree2.update(cx, |worktree2, cx| { - worktree2.load_file(rel_path("main.rs"), cx) - }) - }) - .await - .unwrap() - .file; + let (_, respond_tx) = req_rx.next().await.unwrap(); + + // TODO Put back when we have a structured request again + // assert_eq!( + // request.excerpt_path.as_ref(), + // Path::new(path!("root/foo.md")) + // ); + // assert_eq!( + // request.cursor_point, + // Point { + // line: Line(1), + // column: 3 + // } + // ); + + respond_tx + .send(model_response(indoc! { r" + --- a/root/foo.md + +++ b/root/foo.md + @@ ... @@ + Hello! + -How + +How are you? + Bye + "})) + .unwrap(); - buffer.update(cx, |buffer, cx| { - buffer.file_updated(closed_source_file, cx); - }); + let prediction = prediction_task.await.unwrap().unwrap(); - run_edit_prediction(&buffer, &project, &zeta, cx).await; + assert_eq!(prediction.edits.len(), 1); assert_eq!( - captured_request.lock().clone().unwrap().can_collect_data, - false + prediction.edits[0].0.to_point(&snapshot).start, + language::Point::new(1, 3) ); + assert_eq!(prediction.edits[0].1.as_ref(), " are you?"); } #[gpui::test] - async fn test_no_data_collection_for_events_in_uncollectable_buffers(cx: &mut TestAppContext) { - init_test(cx); - - let fs = project::FakeFs::new(cx.executor()); + async fn test_request_events(cx: &mut TestAppContext) { + let (zeta, mut req_rx) = init_test(cx); + let fs = FakeFs::new(cx.executor()); fs.insert_tree( - path!("/worktree1"), - json!({ "LICENSE": BSD_0_TXT, "main.rs": "", "other.rs": "" }), + "/root", + json!({ + "foo.md": "Hello!\n\nBye\n" + }), ) .await; - fs.insert_tree(path!("/worktree2"), json!({ "private.rs": "" })) - .await; + let project = Project::test(fs, vec![path!("/root").as_ref()], cx).await; - let project = Project::test( - fs.clone(), - [path!("/worktree1").as_ref(), path!("/worktree2").as_ref()], - cx, - ) - .await; let buffer = project .update(cx, |project, cx| { - project.open_local_buffer(path!("/worktree1/main.rs"), cx) - }) - .await - .unwrap(); - let private_buffer = project - .update(cx, |project, cx| { - project.open_local_buffer(path!("/worktree2/file.rs"), cx) + let path = project.find_project_path(path!("root/foo.md"), cx).unwrap(); + project.open_buffer(path, cx) }) .await .unwrap(); - let (zeta, captured_request, _) = make_test_zeta(&project, cx).await; - zeta.update(cx, |zeta, _cx| { - zeta.data_collection_choice = DataCollectionChoice::Enabled + zeta.update(cx, |zeta, cx| { + zeta.register_buffer(&buffer, &project, cx); }); - run_edit_prediction(&buffer, &project, &zeta, cx).await; - assert_eq!( - captured_request.lock().clone().unwrap().can_collect_data, - true - ); + buffer.update(cx, |buffer, cx| { + buffer.edit(vec![(7..7, "How")], None, cx); + }); - // this has a side effect of registering the buffer to watch for edits - run_edit_prediction(&private_buffer, &project, &zeta, cx).await; - assert_eq!( - captured_request.lock().clone().unwrap().can_collect_data, - false - ); + let snapshot = buffer.read_with(cx, |buffer, _cx| buffer.snapshot()); + let position = snapshot.anchor_before(language::Point::new(1, 3)); - private_buffer.update(cx, |private_buffer, cx| { - private_buffer.edit([(0..0, "An edit for the history!")], None, cx); + let prediction_task = zeta.update(cx, |zeta, cx| { + zeta.request_prediction(&project, &buffer, position, cx) }); - run_edit_prediction(&buffer, &project, &zeta, cx).await; - assert_eq!( - captured_request.lock().clone().unwrap().can_collect_data, - false + let (request, respond_tx) = req_rx.next().await.unwrap(); + + let prompt = prompt_from_request(&request); + assert!( + prompt.contains(indoc! {" + --- a/root/foo.md + +++ b/root/foo.md + @@ -1,3 +1,3 @@ + Hello! + - + +How + Bye + "}), + "{prompt}" ); - // make an edit that uses too many bytes, causing private_buffer edit to not be able to be - // included - buffer.update(cx, |buffer, cx| { - buffer.edit( - [(0..0, " ".repeat(MAX_EVENT_TOKENS * BYTES_PER_TOKEN_GUESS))], - None, - cx, - ); - }); + respond_tx + .send(model_response(indoc! {r#" + --- a/root/foo.md + +++ b/root/foo.md + @@ ... @@ + Hello! + -How + +How are you? + Bye + "#})) + .unwrap(); + + let prediction = prediction_task.await.unwrap().unwrap(); - run_edit_prediction(&buffer, &project, &zeta, cx).await; + assert_eq!(prediction.edits.len(), 1); assert_eq!( - captured_request.lock().clone().unwrap().can_collect_data, - true + prediction.edits[0].0.to_point(&snapshot).start, + language::Point::new(1, 3) ); + assert_eq!(prediction.edits[0].1.as_ref(), " are you?"); + } + + // Skipped until we start including diagnostics in prompt + // #[gpui::test] + // async fn test_request_diagnostics(cx: &mut TestAppContext) { + // let (zeta, mut req_rx) = init_test(cx); + // let fs = FakeFs::new(cx.executor()); + // fs.insert_tree( + // "/root", + // json!({ + // "foo.md": "Hello!\nBye" + // }), + // ) + // .await; + // let project = Project::test(fs, vec![path!("/root").as_ref()], cx).await; + + // let path_to_buffer_uri = lsp::Uri::from_file_path(path!("/root/foo.md")).unwrap(); + // let diagnostic = lsp::Diagnostic { + // range: lsp::Range::new(lsp::Position::new(1, 1), lsp::Position::new(1, 5)), + // severity: Some(lsp::DiagnosticSeverity::ERROR), + // message: "\"Hello\" deprecated. Use \"Hi\" instead".to_string(), + // ..Default::default() + // }; + + // project.update(cx, |project, cx| { + // project.lsp_store().update(cx, |lsp_store, cx| { + // // Create some diagnostics + // lsp_store + // .update_diagnostics( + // LanguageServerId(0), + // lsp::PublishDiagnosticsParams { + // uri: path_to_buffer_uri.clone(), + // diagnostics: vec![diagnostic], + // version: None, + // }, + // None, + // language::DiagnosticSourceKind::Pushed, + // &[], + // cx, + // ) + // .unwrap(); + // }); + // }); + + // let buffer = project + // .update(cx, |project, cx| { + // let path = project.find_project_path(path!("root/foo.md"), cx).unwrap(); + // project.open_buffer(path, cx) + // }) + // .await + // .unwrap(); + + // let snapshot = buffer.read_with(cx, |buffer, _cx| buffer.snapshot()); + // let position = snapshot.anchor_before(language::Point::new(0, 0)); + + // let _prediction_task = zeta.update(cx, |zeta, cx| { + // zeta.request_prediction(&project, &buffer, position, cx) + // }); + + // let (request, _respond_tx) = req_rx.next().await.unwrap(); + + // assert_eq!(request.diagnostic_groups.len(), 1); + // let value = serde_json::from_str::(request.diagnostic_groups[0].0.get()) + // .unwrap(); + // // We probably don't need all of this. TODO define a specific diagnostic type in predict_edits_v3 + // assert_eq!( + // value, + // json!({ + // "entries": [{ + // "range": { + // "start": 8, + // "end": 10 + // }, + // "diagnostic": { + // "source": null, + // "code": null, + // "code_description": null, + // "severity": 1, + // "message": "\"Hello\" deprecated. Use \"Hi\" instead", + // "markdown": null, + // "group_id": 0, + // "is_primary": true, + // "is_disk_based": false, + // "is_unnecessary": false, + // "source_kind": "Pushed", + // "data": null, + // "underline": true + // } + // }], + // "primary_ix": 0 + // }) + // ); + // } + + fn model_response(text: &str) -> open_ai::Response { + open_ai::Response { + id: Uuid::new_v4().to_string(), + object: "response".into(), + created: 0, + model: "model".into(), + choices: vec![open_ai::Choice { + index: 0, + message: open_ai::RequestMessage::Assistant { + content: Some(open_ai::MessageContent::Plain(text.to_string())), + tool_calls: vec![], + }, + finish_reason: None, + }], + usage: Usage { + prompt_tokens: 0, + completion_tokens: 0, + total_tokens: 0, + }, + } } - fn init_test(cx: &mut TestAppContext) { - cx.update(|cx| { - let settings_store = SettingsStore::test(cx); - cx.set_global(settings_store); - }); - } - - async fn apply_edit_prediction( - buffer_content: &str, - completion_response: &str, - cx: &mut TestAppContext, - ) -> String { - let fs = project::FakeFs::new(cx.executor()); - let project = Project::test(fs.clone(), [path!("/project").as_ref()], cx).await; - let buffer = cx.new(|cx| Buffer::local(buffer_content, cx)); - let (zeta, _, response) = make_test_zeta(&project, cx).await; - *response.lock() = completion_response.to_string(); - let edit_prediction = run_edit_prediction(&buffer, &project, &zeta, cx).await; - buffer.update(cx, |buffer, cx| { - buffer.edit(edit_prediction.edits.iter().cloned(), None, cx) - }); - buffer.read_with(cx, |buffer, _| buffer.text()) - } - - async fn run_edit_prediction( - buffer: &Entity, - project: &Entity, - zeta: &Entity, - cx: &mut TestAppContext, - ) -> EditPrediction { - let cursor = buffer.read_with(cx, |buffer, _| buffer.anchor_before(Point::new(1, 0))); - zeta.update(cx, |zeta, cx| zeta.register_buffer(buffer, &project, cx)); - cx.background_executor.run_until_parked(); - let completion_task = zeta.update(cx, |zeta, cx| { - zeta.request_completion(&project, buffer, cursor, cx) - }); - completion_task.await.unwrap().unwrap() + fn prompt_from_request(request: &open_ai::Request) -> &str { + assert_eq!(request.messages.len(), 1); + let open_ai::RequestMessage::User { + content: open_ai::MessageContent::Plain(content), + .. + } = &request.messages[0] + else { + panic!( + "Request does not have single user message of type Plain. {:#?}", + request + ); + }; + content } - async fn make_test_zeta( - project: &Entity, + fn init_test( cx: &mut TestAppContext, ) -> ( Entity, - Arc>>, - Arc>, + mpsc::UnboundedReceiver<(open_ai::Request, oneshot::Sender)>, ) { - let default_response = indoc! {" - ```main.rs - <|start_of_file|> - <|editable_region_start|> - hello world - <|editable_region_end|> - ```" - }; - let captured_request: Arc>> = Arc::new(Mutex::new(None)); - let completion_response: Arc> = - Arc::new(Mutex::new(default_response.to_string())); - let http_client = FakeHttpClient::create({ - let captured_request = captured_request.clone(); - let completion_response = completion_response.clone(); - move |req| { - let captured_request = captured_request.clone(); - let completion_response = completion_response.clone(); - async move { - match (req.method(), req.uri().path()) { - (&Method::POST, "/client/llm_tokens") => { - Ok(http_client::Response::builder() - .status(200) - .body( - serde_json::to_string(&CreateLlmTokenResponse { - token: LlmToken("the-llm-token".to_string()), - }) - .unwrap() - .into(), - ) - .unwrap()) - } - (&Method::POST, "/predict_edits/v2") => { - let mut request_body = String::new(); - req.into_body().read_to_string(&mut request_body).await?; - *captured_request.lock() = - Some(serde_json::from_str(&request_body).unwrap()); - Ok(http_client::Response::builder() - .status(200) - .body( - serde_json::to_string(&PredictEditsResponse { - request_id: Uuid::new_v4().to_string(), - output_excerpt: completion_response.lock().clone(), - }) - .unwrap() - .into(), - ) - .unwrap()) - } - _ => Ok(http_client::Response::builder() - .status(404) - .body("Not Found".into()) - .unwrap()), + cx.update(move |cx| { + let settings_store = SettingsStore::test(cx); + cx.set_global(settings_store); + zlog::init_test(); + + let (req_tx, req_rx) = mpsc::unbounded(); + + let http_client = FakeHttpClient::create({ + move |req| { + let uri = req.uri().path().to_string(); + let mut body = req.into_body(); + let req_tx = req_tx.clone(); + async move { + let resp = match uri.as_str() { + "/client/llm_tokens" => serde_json::to_string(&json!({ + "token": "test" + })) + .unwrap(), + "/predict_edits/raw" => { + let mut buf = Vec::new(); + body.read_to_end(&mut buf).await.ok(); + let req = serde_json::from_slice(&buf).unwrap(); + + let (res_tx, res_rx) = oneshot::channel(); + req_tx.unbounded_send((req, res_tx)).unwrap(); + serde_json::to_string(&res_rx.await?).unwrap() + } + _ => { + panic!("Unexpected path: {}", uri) + } + }; + + Ok(Response::builder().body(resp.into()).unwrap()) } } - } - }); - - let client = cx.update(|cx| Client::new(Arc::new(FakeSystemClock::new()), http_client, cx)); - cx.update(|cx| { - RefreshLlmTokenListener::register(client.clone(), cx); - }); - let _server = FakeServer::for_client(42, &client, cx).await; - - let zeta = cx.new(|cx| { - let mut zeta = Zeta::new(client, project.read(cx).user_store(), cx); - - let worktrees = project.read(cx).worktrees(cx).collect::>(); - for worktree in worktrees { - let worktree_id = worktree.read(cx).id(); - zeta.license_detection_watchers - .entry(worktree_id) - .or_insert_with(|| Rc::new(LicenseDetectionWatcher::new(&worktree, cx))); - } - - zeta - }); + }); - (zeta, captured_request, completion_response) - } + let client = client::Client::new(Arc::new(FakeSystemClock::new()), http_client, cx); + client.cloud_client().set_credentials(1, "test".into()); - fn to_completion_edits( - iterator: impl IntoIterator, Arc)>, - buffer: &Entity, - cx: &App, - ) -> Vec<(Range, Arc)> { - let buffer = buffer.read(cx); - iterator - .into_iter() - .map(|(range, text)| { - ( - buffer.anchor_after(range.start)..buffer.anchor_before(range.end), - text, - ) - }) - .collect() - } + language_model::init(client.clone(), cx); - fn from_completion_edits( - editor_edits: &[(Range, Arc)], - buffer: &Entity, - cx: &App, - ) -> Vec<(Range, Arc)> { - let buffer = buffer.read(cx); - editor_edits - .iter() - .map(|(range, text)| { - ( - range.start.to_offset(buffer)..range.end.to_offset(buffer), - text.clone(), - ) - }) - .collect() - } + let user_store = cx.new(|cx| UserStore::new(client.clone(), cx)); + let zeta = Zeta::global(&client, &user_store, cx); - #[ctor::ctor] - fn init_logger() { - zlog::init_test(); + (zeta, req_rx) + }) } } diff --git a/crates/zeta/src/zeta1.rs b/crates/zeta/src/zeta1.rs new file mode 100644 index 0000000000000000000000000000000000000000..5a779cabeceac0bcb58340f7bbb98175409916e8 --- /dev/null +++ b/crates/zeta/src/zeta1.rs @@ -0,0 +1,500 @@ +mod input_excerpt; + +use std::{fmt::Write, ops::Range, path::Path, sync::Arc, time::Instant}; + +use crate::{ + EditPredictionId, ZedUpdateRequiredError, Zeta, + prediction::{EditPrediction, EditPredictionInputs}, +}; +use anyhow::{Context as _, Result}; +use cloud_llm_client::{ + PredictEditsBody, PredictEditsGitInfo, PredictEditsResponse, predict_edits_v3::Event, +}; +use gpui::{App, AppContext as _, AsyncApp, Context, Entity, SharedString, Task}; +use input_excerpt::excerpt_for_cursor_position; +use language::{ + Anchor, Buffer, BufferSnapshot, OffsetRangeExt as _, Point, ToPoint as _, text_diff, +}; +use project::{Project, ProjectPath}; +use release_channel::AppVersion; +use workspace::notifications::{ErrorMessagePrompt, NotificationId, show_app_notification}; + +const CURSOR_MARKER: &str = "<|user_cursor_is_here|>"; +const START_OF_FILE_MARKER: &str = "<|start_of_file|>"; +const EDITABLE_REGION_START_MARKER: &str = "<|editable_region_start|>"; +const EDITABLE_REGION_END_MARKER: &str = "<|editable_region_end|>"; + +pub(crate) const MAX_CONTEXT_TOKENS: usize = 150; +pub(crate) const MAX_REWRITE_TOKENS: usize = 350; +pub(crate) const MAX_EVENT_TOKENS: usize = 500; + +pub(crate) fn request_prediction_with_zeta1( + zeta: &mut Zeta, + project: &Entity, + buffer: &Entity, + position: language::Anchor, + cx: &mut Context, +) -> Task>> { + let buffer = buffer.clone(); + let buffer_snapshotted_at = Instant::now(); + let snapshot = buffer.read(cx).snapshot(); + let client = zeta.client.clone(); + let llm_token = zeta.llm_token.clone(); + let app_version = AppVersion::global(cx); + + let zeta_project = zeta.get_or_init_zeta_project(project, cx); + let events = Arc::new(zeta_project.events(cx)); + + let (git_info, can_collect_file) = if let Some(file) = snapshot.file() { + let can_collect_file = zeta.can_collect_file(project, file, cx); + let git_info = if can_collect_file { + git_info_for_file(project, &ProjectPath::from_file(file.as_ref(), cx), cx) + } else { + None + }; + (git_info, can_collect_file) + } else { + (None, false) + }; + + let full_path: Arc = snapshot + .file() + .map(|f| Arc::from(f.full_path(cx).as_path())) + .unwrap_or_else(|| Arc::from(Path::new("untitled"))); + let full_path_str = full_path.to_string_lossy().into_owned(); + let cursor_point = position.to_point(&snapshot); + let prompt_for_events = { + let events = events.clone(); + move || prompt_for_events_impl(&events, MAX_EVENT_TOKENS) + }; + let gather_task = gather_context( + full_path_str, + &snapshot, + cursor_point, + prompt_for_events, + cx, + ); + + cx.spawn(async move |this, cx| { + let GatherContextOutput { + mut body, + context_range, + editable_range, + included_events_count, + } = gather_task.await?; + let done_gathering_context_at = Instant::now(); + + let included_events = &events[events.len() - included_events_count..events.len()]; + body.can_collect_data = can_collect_file + && this + .read_with(cx, |this, _| this.can_collect_events(included_events)) + .unwrap_or(false); + if body.can_collect_data { + body.git_info = git_info; + } + + log::debug!( + "Events:\n{}\nExcerpt:\n{:?}", + body.input_events, + body.input_excerpt + ); + + let http_client = client.http_client(); + + let response = Zeta::send_api_request::( + |request| { + let uri = if let Ok(predict_edits_url) = std::env::var("ZED_PREDICT_EDITS_URL") { + predict_edits_url + } else { + http_client + .build_zed_llm_url("/predict_edits/v2", &[])? + .as_str() + .into() + }; + Ok(request + .uri(uri) + .body(serde_json::to_string(&body)?.into())?) + }, + client, + llm_token, + app_version, + ) + .await; + + let inputs = EditPredictionInputs { + events: included_events.into(), + included_files: vec![cloud_llm_client::predict_edits_v3::IncludedFile { + path: full_path.clone(), + max_row: cloud_llm_client::predict_edits_v3::Line(snapshot.max_point().row), + excerpts: vec![cloud_llm_client::predict_edits_v3::Excerpt { + start_line: cloud_llm_client::predict_edits_v3::Line(context_range.start.row), + text: snapshot + .text_for_range(context_range) + .collect::() + .into(), + }], + }], + cursor_point: cloud_llm_client::predict_edits_v3::Point { + column: cursor_point.column, + line: cloud_llm_client::predict_edits_v3::Line(cursor_point.row), + }, + cursor_path: full_path, + }; + + // let response = perform_predict_edits(PerformPredictEditsParams { + // client, + // llm_token, + // app_version, + // body, + // }) + // .await; + + let (response, usage) = match response { + Ok(response) => response, + Err(err) => { + if err.is::() { + cx.update(|cx| { + this.update(cx, |zeta, _cx| { + zeta.update_required = true; + }) + .ok(); + + let error_message: SharedString = err.to_string().into(); + show_app_notification( + NotificationId::unique::(), + cx, + move |cx| { + cx.new(|cx| { + ErrorMessagePrompt::new(error_message.clone(), cx) + .with_link_button("Update Zed", "https://zed.dev/releases") + }) + }, + ); + }) + .ok(); + } + + return Err(err); + } + }; + + let received_response_at = Instant::now(); + log::debug!("completion response: {}", &response.output_excerpt); + + if let Some(usage) = usage { + this.update(cx, |this, cx| { + this.user_store.update(cx, |user_store, cx| { + user_store.update_edit_prediction_usage(usage, cx); + }); + }) + .ok(); + } + + let edit_prediction = process_completion_response( + response, + buffer, + &snapshot, + editable_range, + inputs, + buffer_snapshotted_at, + received_response_at, + cx, + ) + .await; + + let finished_at = Instant::now(); + + // record latency for ~1% of requests + if rand::random::() <= 2 { + telemetry::event!( + "Edit Prediction Request", + context_latency = done_gathering_context_at + .duration_since(buffer_snapshotted_at) + .as_millis(), + request_latency = received_response_at + .duration_since(done_gathering_context_at) + .as_millis(), + process_latency = finished_at.duration_since(received_response_at).as_millis() + ); + } + + edit_prediction + }) +} + +fn process_completion_response( + prediction_response: PredictEditsResponse, + buffer: Entity, + snapshot: &BufferSnapshot, + editable_range: Range, + inputs: EditPredictionInputs, + buffer_snapshotted_at: Instant, + received_response_at: Instant, + cx: &AsyncApp, +) -> Task>> { + let snapshot = snapshot.clone(); + let request_id = prediction_response.request_id; + let output_excerpt = prediction_response.output_excerpt; + cx.spawn(async move |cx| { + let output_excerpt: Arc = output_excerpt.into(); + + let edits: Arc<[(Range, Arc)]> = cx + .background_spawn({ + let output_excerpt = output_excerpt.clone(); + let editable_range = editable_range.clone(); + let snapshot = snapshot.clone(); + async move { parse_edits(output_excerpt, editable_range, &snapshot) } + }) + .await? + .into(); + + Ok(EditPrediction::new( + EditPredictionId(request_id.into()), + &buffer, + &snapshot, + edits, + buffer_snapshotted_at, + received_response_at, + inputs, + cx, + ) + .await) + }) +} + +fn parse_edits( + output_excerpt: Arc, + editable_range: Range, + snapshot: &BufferSnapshot, +) -> Result, Arc)>> { + let content = output_excerpt.replace(CURSOR_MARKER, ""); + + let start_markers = content + .match_indices(EDITABLE_REGION_START_MARKER) + .collect::>(); + anyhow::ensure!( + start_markers.len() == 1, + "expected exactly one start marker, found {}", + start_markers.len() + ); + + let end_markers = content + .match_indices(EDITABLE_REGION_END_MARKER) + .collect::>(); + anyhow::ensure!( + end_markers.len() == 1, + "expected exactly one end marker, found {}", + end_markers.len() + ); + + let sof_markers = content + .match_indices(START_OF_FILE_MARKER) + .collect::>(); + anyhow::ensure!( + sof_markers.len() <= 1, + "expected at most one start-of-file marker, found {}", + sof_markers.len() + ); + + let codefence_start = start_markers[0].0; + let content = &content[codefence_start..]; + + let newline_ix = content.find('\n').context("could not find newline")?; + let content = &content[newline_ix + 1..]; + + let codefence_end = content + .rfind(&format!("\n{EDITABLE_REGION_END_MARKER}")) + .context("could not find end marker")?; + let new_text = &content[..codefence_end]; + + let old_text = snapshot + .text_for_range(editable_range.clone()) + .collect::(); + + Ok(compute_edits( + old_text, + new_text, + editable_range.start, + snapshot, + )) +} + +pub fn compute_edits( + old_text: String, + new_text: &str, + offset: usize, + snapshot: &BufferSnapshot, +) -> Vec<(Range, Arc)> { + text_diff(&old_text, new_text) + .into_iter() + .map(|(mut old_range, new_text)| { + old_range.start += offset; + old_range.end += offset; + + let prefix_len = common_prefix( + snapshot.chars_for_range(old_range.clone()), + new_text.chars(), + ); + old_range.start += prefix_len; + + let suffix_len = common_prefix( + snapshot.reversed_chars_for_range(old_range.clone()), + new_text[prefix_len..].chars().rev(), + ); + old_range.end = old_range.end.saturating_sub(suffix_len); + + let new_text = new_text[prefix_len..new_text.len() - suffix_len].into(); + let range = if old_range.is_empty() { + let anchor = snapshot.anchor_after(old_range.start); + anchor..anchor + } else { + snapshot.anchor_after(old_range.start)..snapshot.anchor_before(old_range.end) + }; + (range, new_text) + }) + .collect() +} + +fn common_prefix, T2: Iterator>(a: T1, b: T2) -> usize { + a.zip(b) + .take_while(|(a, b)| a == b) + .map(|(a, _)| a.len_utf8()) + .sum() +} + +fn git_info_for_file( + project: &Entity, + project_path: &ProjectPath, + cx: &App, +) -> Option { + let git_store = project.read(cx).git_store().read(cx); + if let Some((repository, _repo_path)) = + git_store.repository_and_path_for_project_path(project_path, cx) + { + let repository = repository.read(cx); + let head_sha = repository + .head_commit + .as_ref() + .map(|head_commit| head_commit.sha.to_string()); + let remote_origin_url = repository.remote_origin_url.clone(); + let remote_upstream_url = repository.remote_upstream_url.clone(); + if head_sha.is_none() && remote_origin_url.is_none() && remote_upstream_url.is_none() { + return None; + } + Some(PredictEditsGitInfo { + head_sha, + remote_origin_url, + remote_upstream_url, + }) + } else { + None + } +} + +pub struct GatherContextOutput { + pub body: PredictEditsBody, + pub context_range: Range, + pub editable_range: Range, + pub included_events_count: usize, +} + +pub fn gather_context( + full_path_str: String, + snapshot: &BufferSnapshot, + cursor_point: language::Point, + prompt_for_events: impl FnOnce() -> (String, usize) + Send + 'static, + cx: &App, +) -> Task> { + cx.background_spawn({ + let snapshot = snapshot.clone(); + async move { + let input_excerpt = excerpt_for_cursor_position( + cursor_point, + &full_path_str, + &snapshot, + MAX_REWRITE_TOKENS, + MAX_CONTEXT_TOKENS, + ); + let (input_events, included_events_count) = prompt_for_events(); + let editable_range = input_excerpt.editable_range.to_offset(&snapshot); + + let body = PredictEditsBody { + input_events, + input_excerpt: input_excerpt.prompt, + can_collect_data: false, + diagnostic_groups: None, + git_info: None, + outline: None, + speculated_output: None, + }; + + Ok(GatherContextOutput { + body, + context_range: input_excerpt.context_range, + editable_range, + included_events_count, + }) + } + }) +} + +fn prompt_for_events_impl(events: &[Arc], mut remaining_tokens: usize) -> (String, usize) { + let mut result = String::new(); + for (ix, event) in events.iter().rev().enumerate() { + let event_string = format_event(event.as_ref()); + let event_tokens = guess_token_count(event_string.len()); + if event_tokens > remaining_tokens { + return (result, ix); + } + + if !result.is_empty() { + result.insert_str(0, "\n\n"); + } + result.insert_str(0, &event_string); + remaining_tokens -= event_tokens; + } + return (result, events.len()); +} + +pub fn format_event(event: &Event) -> String { + match event { + Event::BufferChange { + path, + old_path, + diff, + .. + } => { + let mut prompt = String::new(); + + if old_path != path { + writeln!( + prompt, + "User renamed {} to {}\n", + old_path.display(), + path.display() + ) + .unwrap(); + } + + if !diff.is_empty() { + write!( + prompt, + "User edited {}:\n```diff\n{}\n```", + path.display(), + diff + ) + .unwrap(); + } + + prompt + } + } +} + +/// Typical number of string bytes per token for the purposes of limiting model input. This is +/// intentionally low to err on the side of underestimating limits. +pub(crate) const BYTES_PER_TOKEN_GUESS: usize = 3; + +fn guess_token_count(bytes: usize) -> usize { + bytes / BYTES_PER_TOKEN_GUESS +} diff --git a/crates/zeta/src/input_excerpt.rs b/crates/zeta/src/zeta1/input_excerpt.rs similarity index 98% rename from crates/zeta/src/input_excerpt.rs rename to crates/zeta/src/zeta1/input_excerpt.rs index 06bff5b1bea0f099b2ccd98605ac5de5bb5e6360..853d74da463c19de4f1d3915cb703a53b6c43c61 100644 --- a/crates/zeta/src/input_excerpt.rs +++ b/crates/zeta/src/zeta1/input_excerpt.rs @@ -1,4 +1,4 @@ -use crate::{ +use super::{ CURSOR_MARKER, EDITABLE_REGION_END_MARKER, EDITABLE_REGION_START_MARKER, START_OF_FILE_MARKER, guess_token_count, }; @@ -7,6 +7,7 @@ use std::{fmt::Write, ops::Range}; #[derive(Debug)] pub struct InputExcerpt { + pub context_range: Range, pub editable_range: Range, pub prompt: String, } @@ -63,6 +64,7 @@ pub fn excerpt_for_cursor_position( write!(prompt, "\n```").unwrap(); InputExcerpt { + context_range, editable_range, prompt, } @@ -124,7 +126,7 @@ mod tests { use super::*; use gpui::{App, AppContext}; use indoc::indoc; - use language::{Buffer, Language, LanguageConfig, LanguageMatcher}; + use language::{Buffer, Language, LanguageConfig, LanguageMatcher, tree_sitter_rust}; use std::sync::Arc; #[gpui::test] diff --git a/crates/zeta/src/zeta_tests.rs b/crates/zeta/src/zeta_tests.rs new file mode 100644 index 0000000000000000000000000000000000000000..eb12f81af25d72b5e7003187ab0a9536622c9a74 --- /dev/null +++ b/crates/zeta/src/zeta_tests.rs @@ -0,0 +1,671 @@ +use client::test::FakeServer; +use clock::{FakeSystemClock, ReplicaId}; +use cloud_api_types::{CreateLlmTokenResponse, LlmToken}; +use cloud_llm_client::{PredictEditsBody, PredictEditsResponse}; +use gpui::TestAppContext; +use http_client::FakeHttpClient; +use indoc::indoc; +use language::Point; +use parking_lot::Mutex; +use serde_json::json; +use settings::SettingsStore; +use util::{path, rel_path::rel_path}; + +use crate::zeta1::MAX_EVENT_TOKENS; + +use super::*; + +const BSD_0_TXT: &str = include_str!("../license_examples/0bsd.txt"); + +#[gpui::test] +async fn test_edit_prediction_basic_interpolation(cx: &mut TestAppContext) { + let buffer = cx.new(|cx| Buffer::local("Lorem ipsum dolor", cx)); + let edits: Arc<[(Range, Arc)]> = cx.update(|cx| { + to_completion_edits([(2..5, "REM".into()), (9..11, "".into())], &buffer, cx).into() + }); + + let edit_preview = cx + .read(|cx| buffer.read(cx).preview_edits(edits.clone(), cx)) + .await; + + let completion = EditPrediction { + edits, + edit_preview, + buffer: buffer.clone(), + snapshot: cx.read(|cx| buffer.read(cx).snapshot()), + id: EditPredictionId("the-id".into()), + inputs: EditPredictionInputs { + events: Default::default(), + included_files: Default::default(), + cursor_point: cloud_llm_client::predict_edits_v3::Point { + line: Line(0), + column: 0, + }, + cursor_path: Path::new("").into(), + }, + buffer_snapshotted_at: Instant::now(), + response_received_at: Instant::now(), + }; + + cx.update(|cx| { + assert_eq!( + from_completion_edits( + &completion.interpolate(&buffer.read(cx).snapshot()).unwrap(), + &buffer, + cx + ), + vec![(2..5, "REM".into()), (9..11, "".into())] + ); + + buffer.update(cx, |buffer, cx| buffer.edit([(2..5, "")], None, cx)); + assert_eq!( + from_completion_edits( + &completion.interpolate(&buffer.read(cx).snapshot()).unwrap(), + &buffer, + cx + ), + vec![(2..2, "REM".into()), (6..8, "".into())] + ); + + buffer.update(cx, |buffer, cx| buffer.undo(cx)); + assert_eq!( + from_completion_edits( + &completion.interpolate(&buffer.read(cx).snapshot()).unwrap(), + &buffer, + cx + ), + vec![(2..5, "REM".into()), (9..11, "".into())] + ); + + buffer.update(cx, |buffer, cx| buffer.edit([(2..5, "R")], None, cx)); + assert_eq!( + from_completion_edits( + &completion.interpolate(&buffer.read(cx).snapshot()).unwrap(), + &buffer, + cx + ), + vec![(3..3, "EM".into()), (7..9, "".into())] + ); + + buffer.update(cx, |buffer, cx| buffer.edit([(3..3, "E")], None, cx)); + assert_eq!( + from_completion_edits( + &completion.interpolate(&buffer.read(cx).snapshot()).unwrap(), + &buffer, + cx + ), + vec![(4..4, "M".into()), (8..10, "".into())] + ); + + buffer.update(cx, |buffer, cx| buffer.edit([(4..4, "M")], None, cx)); + assert_eq!( + from_completion_edits( + &completion.interpolate(&buffer.read(cx).snapshot()).unwrap(), + &buffer, + cx + ), + vec![(9..11, "".into())] + ); + + buffer.update(cx, |buffer, cx| buffer.edit([(4..5, "")], None, cx)); + assert_eq!( + from_completion_edits( + &completion.interpolate(&buffer.read(cx).snapshot()).unwrap(), + &buffer, + cx + ), + vec![(4..4, "M".into()), (8..10, "".into())] + ); + + buffer.update(cx, |buffer, cx| buffer.edit([(8..10, "")], None, cx)); + assert_eq!( + from_completion_edits( + &completion.interpolate(&buffer.read(cx).snapshot()).unwrap(), + &buffer, + cx + ), + vec![(4..4, "M".into())] + ); + + buffer.update(cx, |buffer, cx| buffer.edit([(4..6, "")], None, cx)); + assert_eq!(completion.interpolate(&buffer.read(cx).snapshot()), None); + }) +} + +#[gpui::test] +async fn test_clean_up_diff(cx: &mut TestAppContext) { + init_test(cx); + + assert_eq!( + apply_edit_prediction( + indoc! {" + fn main() { + let word_1 = \"lorem\"; + let range = word.len()..word.len(); + } + "}, + indoc! {" + <|editable_region_start|> + fn main() { + let word_1 = \"lorem\"; + let range = word_1.len()..word_1.len(); + } + + <|editable_region_end|> + "}, + cx, + ) + .await, + indoc! {" + fn main() { + let word_1 = \"lorem\"; + let range = word_1.len()..word_1.len(); + } + "}, + ); + + assert_eq!( + apply_edit_prediction( + indoc! {" + fn main() { + let story = \"the quick\" + } + "}, + indoc! {" + <|editable_region_start|> + fn main() { + let story = \"the quick brown fox jumps over the lazy dog\"; + } + + <|editable_region_end|> + "}, + cx, + ) + .await, + indoc! {" + fn main() { + let story = \"the quick brown fox jumps over the lazy dog\"; + } + "}, + ); +} + +#[gpui::test] +async fn test_edit_prediction_end_of_buffer(cx: &mut TestAppContext) { + init_test(cx); + + let buffer_content = "lorem\n"; + let completion_response = indoc! {" + ```animals.js + <|start_of_file|> + <|editable_region_start|> + lorem + ipsum + <|editable_region_end|> + ```"}; + + assert_eq!( + apply_edit_prediction(buffer_content, completion_response, cx).await, + "lorem\nipsum" + ); +} + +#[gpui::test] +async fn test_can_collect_data(cx: &mut TestAppContext) { + init_test(cx); + + let fs = project::FakeFs::new(cx.executor()); + fs.insert_tree(path!("/project"), json!({ "LICENSE": BSD_0_TXT })) + .await; + + let project = Project::test(fs.clone(), [path!("/project").as_ref()], cx).await; + let buffer = project + .update(cx, |project, cx| { + project.open_local_buffer(path!("/project/src/main.rs"), cx) + }) + .await + .unwrap(); + + let (zeta, captured_request, _) = make_test_zeta(&project, cx).await; + zeta.update(cx, |zeta, _cx| { + zeta.data_collection_choice = DataCollectionChoice::Enabled + }); + + run_edit_prediction(&buffer, &project, &zeta, cx).await; + assert_eq!( + captured_request.lock().clone().unwrap().can_collect_data, + true + ); + + zeta.update(cx, |zeta, _cx| { + zeta.data_collection_choice = DataCollectionChoice::Disabled + }); + + run_edit_prediction(&buffer, &project, &zeta, cx).await; + assert_eq!( + captured_request.lock().clone().unwrap().can_collect_data, + false + ); +} + +#[gpui::test] +async fn test_no_data_collection_for_remote_file(cx: &mut TestAppContext) { + init_test(cx); + + let fs = project::FakeFs::new(cx.executor()); + let project = Project::test(fs.clone(), [], cx).await; + + let buffer = cx.new(|_cx| { + Buffer::remote( + language::BufferId::new(1).unwrap(), + ReplicaId::new(1), + language::Capability::ReadWrite, + "fn main() {\n println!(\"Hello\");\n}", + ) + }); + + let (zeta, captured_request, _) = make_test_zeta(&project, cx).await; + zeta.update(cx, |zeta, _cx| { + zeta.data_collection_choice = DataCollectionChoice::Enabled + }); + + run_edit_prediction(&buffer, &project, &zeta, cx).await; + assert_eq!( + captured_request.lock().clone().unwrap().can_collect_data, + false + ); +} + +#[gpui::test] +async fn test_no_data_collection_for_private_file(cx: &mut TestAppContext) { + init_test(cx); + + let fs = project::FakeFs::new(cx.executor()); + fs.insert_tree( + path!("/project"), + json!({ + "LICENSE": BSD_0_TXT, + ".env": "SECRET_KEY=secret" + }), + ) + .await; + + let project = Project::test(fs.clone(), [path!("/project").as_ref()], cx).await; + let buffer = project + .update(cx, |project, cx| { + project.open_local_buffer("/project/.env", cx) + }) + .await + .unwrap(); + + let (zeta, captured_request, _) = make_test_zeta(&project, cx).await; + zeta.update(cx, |zeta, _cx| { + zeta.data_collection_choice = DataCollectionChoice::Enabled + }); + + run_edit_prediction(&buffer, &project, &zeta, cx).await; + assert_eq!( + captured_request.lock().clone().unwrap().can_collect_data, + false + ); +} + +#[gpui::test] +async fn test_no_data_collection_for_untitled_buffer(cx: &mut TestAppContext) { + init_test(cx); + + let fs = project::FakeFs::new(cx.executor()); + let project = Project::test(fs.clone(), [], cx).await; + let buffer = cx.new(|cx| Buffer::local("", cx)); + + let (zeta, captured_request, _) = make_test_zeta(&project, cx).await; + zeta.update(cx, |zeta, _cx| { + zeta.data_collection_choice = DataCollectionChoice::Enabled + }); + + run_edit_prediction(&buffer, &project, &zeta, cx).await; + assert_eq!( + captured_request.lock().clone().unwrap().can_collect_data, + false + ); +} + +#[gpui::test] +async fn test_no_data_collection_when_closed_source(cx: &mut TestAppContext) { + init_test(cx); + + let fs = project::FakeFs::new(cx.executor()); + fs.insert_tree(path!("/project"), json!({ "main.rs": "fn main() {}" })) + .await; + + let project = Project::test(fs.clone(), [path!("/project").as_ref()], cx).await; + let buffer = project + .update(cx, |project, cx| { + project.open_local_buffer("/project/main.rs", cx) + }) + .await + .unwrap(); + + let (zeta, captured_request, _) = make_test_zeta(&project, cx).await; + zeta.update(cx, |zeta, _cx| { + zeta.data_collection_choice = DataCollectionChoice::Enabled + }); + + run_edit_prediction(&buffer, &project, &zeta, cx).await; + assert_eq!( + captured_request.lock().clone().unwrap().can_collect_data, + false + ); +} + +#[gpui::test] +async fn test_data_collection_status_changes_on_move(cx: &mut TestAppContext) { + init_test(cx); + + let fs = project::FakeFs::new(cx.executor()); + fs.insert_tree( + path!("/open_source_worktree"), + json!({ "LICENSE": BSD_0_TXT, "main.rs": "" }), + ) + .await; + fs.insert_tree(path!("/closed_source_worktree"), json!({ "main.rs": "" })) + .await; + + let project = Project::test( + fs.clone(), + [ + path!("/open_source_worktree").as_ref(), + path!("/closed_source_worktree").as_ref(), + ], + cx, + ) + .await; + let buffer = project + .update(cx, |project, cx| { + project.open_local_buffer(path!("/open_source_worktree/main.rs"), cx) + }) + .await + .unwrap(); + + let (zeta, captured_request, _) = make_test_zeta(&project, cx).await; + zeta.update(cx, |zeta, _cx| { + zeta.data_collection_choice = DataCollectionChoice::Enabled + }); + + run_edit_prediction(&buffer, &project, &zeta, cx).await; + assert_eq!( + captured_request.lock().clone().unwrap().can_collect_data, + true + ); + + let closed_source_file = project + .update(cx, |project, cx| { + let worktree2 = project + .worktree_for_root_name("closed_source_worktree", cx) + .unwrap(); + worktree2.update(cx, |worktree2, cx| { + worktree2.load_file(rel_path("main.rs"), cx) + }) + }) + .await + .unwrap() + .file; + + buffer.update(cx, |buffer, cx| { + buffer.file_updated(closed_source_file, cx); + }); + + run_edit_prediction(&buffer, &project, &zeta, cx).await; + assert_eq!( + captured_request.lock().clone().unwrap().can_collect_data, + false + ); +} + +#[gpui::test] +async fn test_no_data_collection_for_events_in_uncollectable_buffers(cx: &mut TestAppContext) { + init_test(cx); + + let fs = project::FakeFs::new(cx.executor()); + fs.insert_tree( + path!("/worktree1"), + json!({ "LICENSE": BSD_0_TXT, "main.rs": "", "other.rs": "" }), + ) + .await; + fs.insert_tree(path!("/worktree2"), json!({ "private.rs": "" })) + .await; + + let project = Project::test( + fs.clone(), + [path!("/worktree1").as_ref(), path!("/worktree2").as_ref()], + cx, + ) + .await; + let buffer = project + .update(cx, |project, cx| { + project.open_local_buffer(path!("/worktree1/main.rs"), cx) + }) + .await + .unwrap(); + let private_buffer = project + .update(cx, |project, cx| { + project.open_local_buffer(path!("/worktree2/file.rs"), cx) + }) + .await + .unwrap(); + + let (zeta, captured_request, _) = make_test_zeta(&project, cx).await; + zeta.update(cx, |zeta, _cx| { + zeta.data_collection_choice = DataCollectionChoice::Enabled + }); + + run_edit_prediction(&buffer, &project, &zeta, cx).await; + assert_eq!( + captured_request.lock().clone().unwrap().can_collect_data, + true + ); + + // this has a side effect of registering the buffer to watch for edits + run_edit_prediction(&private_buffer, &project, &zeta, cx).await; + assert_eq!( + captured_request.lock().clone().unwrap().can_collect_data, + false + ); + + private_buffer.update(cx, |private_buffer, cx| { + private_buffer.edit([(0..0, "An edit for the history!")], None, cx); + }); + + run_edit_prediction(&buffer, &project, &zeta, cx).await; + assert_eq!( + captured_request.lock().clone().unwrap().can_collect_data, + false + ); + + // make an edit that uses too many bytes, causing private_buffer edit to not be able to be + // included + buffer.update(cx, |buffer, cx| { + buffer.edit( + [( + 0..0, + " ".repeat(MAX_EVENT_TOKENS * zeta1::BYTES_PER_TOKEN_GUESS), + )], + None, + cx, + ); + }); + + run_edit_prediction(&buffer, &project, &zeta, cx).await; + assert_eq!( + captured_request.lock().clone().unwrap().can_collect_data, + true + ); +} + +fn init_test(cx: &mut TestAppContext) { + cx.update(|cx| { + let settings_store = SettingsStore::test(cx); + cx.set_global(settings_store); + }); +} + +async fn apply_edit_prediction( + buffer_content: &str, + completion_response: &str, + cx: &mut TestAppContext, +) -> String { + let fs = project::FakeFs::new(cx.executor()); + let project = Project::test(fs.clone(), [path!("/project").as_ref()], cx).await; + let buffer = cx.new(|cx| Buffer::local(buffer_content, cx)); + let (zeta, _, response) = make_test_zeta(&project, cx).await; + *response.lock() = completion_response.to_string(); + let edit_prediction = run_edit_prediction(&buffer, &project, &zeta, cx).await; + buffer.update(cx, |buffer, cx| { + buffer.edit(edit_prediction.edits.iter().cloned(), None, cx) + }); + buffer.read_with(cx, |buffer, _| buffer.text()) +} + +async fn run_edit_prediction( + buffer: &Entity, + project: &Entity, + zeta: &Entity, + cx: &mut TestAppContext, +) -> EditPrediction { + let cursor = buffer.read_with(cx, |buffer, _| buffer.anchor_before(Point::new(1, 0))); + zeta.update(cx, |zeta, cx| zeta.register_buffer(buffer, &project, cx)); + cx.background_executor.run_until_parked(); + let prediction_task = zeta.update(cx, |zeta, cx| { + zeta.request_prediction(&project, buffer, cursor, cx) + }); + prediction_task.await.unwrap().unwrap() +} + +async fn make_test_zeta( + project: &Entity, + cx: &mut TestAppContext, +) -> ( + Entity, + Arc>>, + Arc>, +) { + let default_response = indoc! {" + ```main.rs + <|start_of_file|> + <|editable_region_start|> + hello world + <|editable_region_end|> + ```" + }; + let captured_request: Arc>> = Arc::new(Mutex::new(None)); + let completion_response: Arc> = + Arc::new(Mutex::new(default_response.to_string())); + let http_client = FakeHttpClient::create({ + let captured_request = captured_request.clone(); + let completion_response = completion_response.clone(); + let mut next_request_id = 0; + move |req| { + let captured_request = captured_request.clone(); + let completion_response = completion_response.clone(); + async move { + match (req.method(), req.uri().path()) { + (&Method::POST, "/client/llm_tokens") => Ok(http_client::Response::builder() + .status(200) + .body( + serde_json::to_string(&CreateLlmTokenResponse { + token: LlmToken("the-llm-token".to_string()), + }) + .unwrap() + .into(), + ) + .unwrap()), + (&Method::POST, "/predict_edits/v2") => { + let mut request_body = String::new(); + req.into_body().read_to_string(&mut request_body).await?; + *captured_request.lock() = + Some(serde_json::from_str(&request_body).unwrap()); + next_request_id += 1; + Ok(http_client::Response::builder() + .status(200) + .body( + serde_json::to_string(&PredictEditsResponse { + request_id: format!("request-{next_request_id}"), + output_excerpt: completion_response.lock().clone(), + }) + .unwrap() + .into(), + ) + .unwrap()) + } + _ => Ok(http_client::Response::builder() + .status(404) + .body("Not Found".into()) + .unwrap()), + } + } + } + }); + + let client = cx.update(|cx| Client::new(Arc::new(FakeSystemClock::new()), http_client, cx)); + cx.update(|cx| { + RefreshLlmTokenListener::register(client.clone(), cx); + }); + let _server = FakeServer::for_client(42, &client, cx).await; + + let zeta = cx.new(|cx| { + let mut zeta = Zeta::new(client, project.read(cx).user_store(), cx); + zeta.set_edit_prediction_model(ZetaEditPredictionModel::Zeta1); + + let worktrees = project.read(cx).worktrees(cx).collect::>(); + for worktree in worktrees { + let worktree_id = worktree.read(cx).id(); + zeta.get_or_init_zeta_project(project, cx) + .license_detection_watchers + .entry(worktree_id) + .or_insert_with(|| Rc::new(LicenseDetectionWatcher::new(&worktree, cx))); + } + + zeta + }); + + (zeta, captured_request, completion_response) +} + +fn to_completion_edits( + iterator: impl IntoIterator, Arc)>, + buffer: &Entity, + cx: &App, +) -> Vec<(Range, Arc)> { + let buffer = buffer.read(cx); + iterator + .into_iter() + .map(|(range, text)| { + ( + buffer.anchor_after(range.start)..buffer.anchor_before(range.end), + text, + ) + }) + .collect() +} + +fn from_completion_edits( + editor_edits: &[(Range, Arc)], + buffer: &Entity, + cx: &App, +) -> Vec<(Range, Arc)> { + let buffer = buffer.read(cx); + editor_edits + .iter() + .map(|(range, text)| { + ( + range.start.to_offset(buffer)..range.end.to_offset(buffer), + text.clone(), + ) + }) + .collect() +} + +#[ctor::ctor] +fn init_logger() { + zlog::init_test(); +} diff --git a/crates/zeta2/Cargo.toml b/crates/zeta2/Cargo.toml deleted file mode 100644 index 0b20f980feaa6c2e86b0d3a6b88150d27d06fab2..0000000000000000000000000000000000000000 --- a/crates/zeta2/Cargo.toml +++ /dev/null @@ -1,61 +0,0 @@ -[package] -name = "zeta2" -version = "0.1.0" -edition.workspace = true -publish.workspace = true -license = "GPL-3.0-or-later" - -[lints] -workspace = true - -[lib] -path = "src/zeta2.rs" - -[features] -eval-support = [] - -[dependencies] -anyhow.workspace = true -arrayvec.workspace = true -brotli.workspace = true -chrono.workspace = true -client.workspace = true -cloud_llm_client.workspace = true -cloud_zeta2_prompt.workspace = true -collections.workspace = true -edit_prediction.workspace = true -edit_prediction_context.workspace = true -feature_flags.workspace = true -futures.workspace = true -gpui.workspace = true -indoc.workspace = true -language.workspace = true -language_model.workspace = true -log.workspace = true -lsp.workspace = true -open_ai.workspace = true -pretty_assertions.workspace = true -project.workspace = true -release_channel.workspace = true -semver.workspace = true -serde.workspace = true -serde_json.workspace = true -smol.workspace = true -strsim.workspace = true -thiserror.workspace = true -util.workspace = true -uuid.workspace = true -workspace.workspace = true -worktree.workspace = true - -[dev-dependencies] -clock = { workspace = true, features = ["test-support"] } -cloud_llm_client = { workspace = true, features = ["test-support"] } -gpui = { workspace = true, features = ["test-support"] } -lsp.workspace = true -indoc.workspace = true -language = { workspace = true, features = ["test-support"] } -language_model = { workspace = true, features = ["test-support"] } -project = { workspace = true, features = ["test-support"] } -settings = { workspace = true, features = ["test-support"] } -zlog.workspace = true diff --git a/crates/zeta2/LICENSE-GPL b/crates/zeta2/LICENSE-GPL deleted file mode 120000 index 89e542f750cd3860a0598eff0dc34b56d7336dc4..0000000000000000000000000000000000000000 --- a/crates/zeta2/LICENSE-GPL +++ /dev/null @@ -1 +0,0 @@ -../../LICENSE-GPL \ No newline at end of file diff --git a/crates/zeta2/src/zeta2.rs b/crates/zeta2/src/zeta2.rs deleted file mode 100644 index 255b294d7cc25fade197c3a50d39130bc6bb99c5..0000000000000000000000000000000000000000 --- a/crates/zeta2/src/zeta2.rs +++ /dev/null @@ -1,2968 +0,0 @@ -use anyhow::{Context as _, Result, anyhow, bail}; -use arrayvec::ArrayVec; -use chrono::TimeDelta; -use client::{Client, EditPredictionUsage, UserStore}; -use cloud_llm_client::predict_edits_v3::{self, PromptFormat, Signature}; -use cloud_llm_client::{ - AcceptEditPredictionBody, EXPIRED_LLM_TOKEN_HEADER_NAME, MINIMUM_REQUIRED_VERSION_HEADER_NAME, - ZED_VERSION_HEADER_NAME, -}; -use cloud_zeta2_prompt::retrieval_prompt::{SearchToolInput, SearchToolQuery}; -use cloud_zeta2_prompt::{CURSOR_MARKER, DEFAULT_MAX_PROMPT_BYTES}; -use collections::HashMap; -use edit_prediction_context::{ - DeclarationId, DeclarationStyle, EditPredictionContext, EditPredictionContextOptions, - EditPredictionExcerpt, EditPredictionExcerptOptions, EditPredictionScoreOptions, Line, - SyntaxIndex, SyntaxIndexState, -}; -use feature_flags::{FeatureFlag, FeatureFlagAppExt as _}; -use futures::AsyncReadExt as _; -use futures::channel::{mpsc, oneshot}; -use gpui::http_client::{AsyncBody, Method}; -use gpui::{ - App, AsyncApp, Entity, EntityId, Global, SharedString, Subscription, Task, WeakEntity, - http_client, prelude::*, -}; -use language::{Anchor, Buffer, DiagnosticSet, LanguageServerId, Point, ToOffset as _, ToPoint}; -use language::{BufferSnapshot, OffsetRangeExt}; -use language_model::{LlmApiToken, RefreshLlmTokenListener}; -use lsp::DiagnosticSeverity; -use open_ai::FunctionDefinition; -use project::{Project, ProjectPath}; -use release_channel::AppVersion; -use semver::Version; -use serde::de::DeserializeOwned; -use std::collections::{VecDeque, hash_map}; - -use std::fmt::Write; -use std::ops::Range; -use std::path::Path; -use std::str::FromStr; -use std::sync::{Arc, LazyLock}; -use std::time::{Duration, Instant}; -use std::{env, mem}; -use thiserror::Error; -use util::rel_path::RelPathBuf; -use util::{LogErrorFuture, RangeExt as _, ResultExt as _, TryFutureExt}; -use workspace::notifications::{ErrorMessagePrompt, NotificationId, show_app_notification}; - -pub mod assemble_excerpts; -mod prediction; -mod provider; -pub mod retrieval_search; -mod sweep_ai; -pub mod udiff; -mod xml_edits; - -use crate::assemble_excerpts::assemble_excerpts; -pub use crate::prediction::EditPrediction; -pub use crate::prediction::EditPredictionId; -pub use provider::ZetaEditPredictionProvider; - -/// Maximum number of events to track. -const EVENT_COUNT_MAX_SWEEP: usize = 6; -const EVENT_COUNT_MAX_ZETA: usize = 16; -const CHANGE_GROUPING_LINE_SPAN: u32 = 8; - -pub struct SweepFeatureFlag; - -impl FeatureFlag for SweepFeatureFlag { - const NAME: &str = "sweep-ai"; -} -pub const DEFAULT_EXCERPT_OPTIONS: EditPredictionExcerptOptions = EditPredictionExcerptOptions { - max_bytes: 512, - min_bytes: 128, - target_before_cursor_over_total_bytes: 0.5, -}; - -pub const DEFAULT_CONTEXT_OPTIONS: ContextMode = - ContextMode::Agentic(DEFAULT_AGENTIC_CONTEXT_OPTIONS); - -pub const DEFAULT_AGENTIC_CONTEXT_OPTIONS: AgenticContextOptions = AgenticContextOptions { - excerpt: DEFAULT_EXCERPT_OPTIONS, -}; - -pub const DEFAULT_SYNTAX_CONTEXT_OPTIONS: EditPredictionContextOptions = - EditPredictionContextOptions { - use_imports: true, - max_retrieved_declarations: 0, - excerpt: DEFAULT_EXCERPT_OPTIONS, - score: EditPredictionScoreOptions { - omit_excerpt_overlaps: true, - }, - }; - -pub const DEFAULT_OPTIONS: ZetaOptions = ZetaOptions { - context: DEFAULT_CONTEXT_OPTIONS, - max_prompt_bytes: DEFAULT_MAX_PROMPT_BYTES, - max_diagnostic_bytes: 2048, - prompt_format: PromptFormat::DEFAULT, - file_indexing_parallelism: 1, - buffer_change_grouping_interval: Duration::from_secs(1), -}; - -static USE_OLLAMA: LazyLock = - LazyLock::new(|| env::var("ZED_ZETA2_OLLAMA").is_ok_and(|var| !var.is_empty())); -static CONTEXT_RETRIEVAL_MODEL_ID: LazyLock = LazyLock::new(|| { - env::var("ZED_ZETA2_CONTEXT_MODEL").unwrap_or(if *USE_OLLAMA { - "qwen3-coder:30b".to_string() - } else { - "yqvev8r3".to_string() - }) -}); -static EDIT_PREDICTIONS_MODEL_ID: LazyLock = LazyLock::new(|| { - match env::var("ZED_ZETA2_MODEL").as_deref() { - Ok("zeta2-exp") => "4w5n28vw", // Fine-tuned model @ Baseten - Ok(model) => model, - Err(_) if *USE_OLLAMA => "qwen3-coder:30b", - Err(_) => "yqvev8r3", // Vanilla qwen3-coder @ Baseten - } - .to_string() -}); -static PREDICT_EDITS_URL: LazyLock> = LazyLock::new(|| { - env::var("ZED_PREDICT_EDITS_URL").ok().or_else(|| { - if *USE_OLLAMA { - Some("http://localhost:11434/v1/chat/completions".into()) - } else { - None - } - }) -}); - -pub struct Zeta2FeatureFlag; - -impl FeatureFlag for Zeta2FeatureFlag { - const NAME: &'static str = "zeta2"; - - fn enabled_for_staff() -> bool { - false - } -} - -#[derive(Clone)] -struct ZetaGlobal(Entity); - -impl Global for ZetaGlobal {} - -pub struct Zeta { - client: Arc, - user_store: Entity, - llm_token: LlmApiToken, - _llm_token_subscription: Subscription, - projects: HashMap, - options: ZetaOptions, - update_required: bool, - debug_tx: Option>, - #[cfg(feature = "eval-support")] - eval_cache: Option>, - edit_prediction_model: ZetaEditPredictionModel, - sweep_api_token: Option, - sweep_ai_debug_info: Arc, -} - -#[derive(PartialEq, Eq)] -pub enum ZetaEditPredictionModel { - ZedCloud, - Sweep, -} - -#[derive(Debug, Clone, PartialEq)] -pub struct ZetaOptions { - pub context: ContextMode, - pub max_prompt_bytes: usize, - pub max_diagnostic_bytes: usize, - pub prompt_format: predict_edits_v3::PromptFormat, - pub file_indexing_parallelism: usize, - pub buffer_change_grouping_interval: Duration, -} - -#[derive(Debug, Clone, PartialEq)] -pub enum ContextMode { - Agentic(AgenticContextOptions), - Syntax(EditPredictionContextOptions), -} - -#[derive(Debug, Clone, PartialEq)] -pub struct AgenticContextOptions { - pub excerpt: EditPredictionExcerptOptions, -} - -impl ContextMode { - pub fn excerpt(&self) -> &EditPredictionExcerptOptions { - match self { - ContextMode::Agentic(options) => &options.excerpt, - ContextMode::Syntax(options) => &options.excerpt, - } - } -} - -#[derive(Debug)] -pub enum ZetaDebugInfo { - ContextRetrievalStarted(ZetaContextRetrievalStartedDebugInfo), - SearchQueriesGenerated(ZetaSearchQueryDebugInfo), - SearchQueriesExecuted(ZetaContextRetrievalDebugInfo), - ContextRetrievalFinished(ZetaContextRetrievalDebugInfo), - EditPredictionRequested(ZetaEditPredictionDebugInfo), -} - -#[derive(Debug)] -pub struct ZetaContextRetrievalStartedDebugInfo { - pub project: Entity, - pub timestamp: Instant, - pub search_prompt: String, -} - -#[derive(Debug)] -pub struct ZetaContextRetrievalDebugInfo { - pub project: Entity, - pub timestamp: Instant, -} - -#[derive(Debug)] -pub struct ZetaEditPredictionDebugInfo { - pub request: predict_edits_v3::PredictEditsRequest, - pub retrieval_time: TimeDelta, - pub buffer: WeakEntity, - pub position: language::Anchor, - pub local_prompt: Result, - pub response_rx: oneshot::Receiver<(Result, TimeDelta)>, -} - -#[derive(Debug)] -pub struct ZetaSearchQueryDebugInfo { - pub project: Entity, - pub timestamp: Instant, - pub search_queries: Vec, -} - -pub type RequestDebugInfo = predict_edits_v3::DebugInfo; - -struct ZetaProject { - syntax_index: Option>, - events: VecDeque, - recent_paths: VecDeque, - registered_buffers: HashMap, - current_prediction: Option, - next_pending_prediction_id: usize, - pending_predictions: ArrayVec, - last_prediction_refresh: Option<(EntityId, Instant)>, - context: Option, Vec>>>, - refresh_context_task: Option>>>, - refresh_context_debounce_task: Option>>, - refresh_context_timestamp: Option, - _subscription: gpui::Subscription, -} - -#[derive(Debug, Clone)] -struct CurrentEditPrediction { - pub requested_by: PredictionRequestedBy, - pub prediction: EditPrediction, -} - -impl CurrentEditPrediction { - fn should_replace_prediction(&self, old_prediction: &Self, cx: &App) -> bool { - let Some(new_edits) = self - .prediction - .interpolate(&self.prediction.buffer.read(cx)) - else { - return false; - }; - - if self.prediction.buffer != old_prediction.prediction.buffer { - return true; - } - - let Some(old_edits) = old_prediction - .prediction - .interpolate(&old_prediction.prediction.buffer.read(cx)) - else { - return true; - }; - - let requested_by_buffer_id = self.requested_by.buffer_id(); - - // This reduces the occurrence of UI thrash from replacing edits - // - // TODO: This is fairly arbitrary - should have a more general heuristic that handles multiple edits. - if requested_by_buffer_id == Some(self.prediction.buffer.entity_id()) - && requested_by_buffer_id == Some(old_prediction.prediction.buffer.entity_id()) - && old_edits.len() == 1 - && new_edits.len() == 1 - { - let (old_range, old_text) = &old_edits[0]; - let (new_range, new_text) = &new_edits[0]; - new_range == old_range && new_text.starts_with(old_text.as_ref()) - } else { - true - } - } -} - -#[derive(Debug, Clone)] -enum PredictionRequestedBy { - DiagnosticsUpdate, - Buffer(EntityId), -} - -impl PredictionRequestedBy { - pub fn buffer_id(&self) -> Option { - match self { - PredictionRequestedBy::DiagnosticsUpdate => None, - PredictionRequestedBy::Buffer(buffer_id) => Some(*buffer_id), - } - } -} - -struct PendingPrediction { - id: usize, - _task: Task<()>, -} - -/// A prediction from the perspective of a buffer. -#[derive(Debug)] -enum BufferEditPrediction<'a> { - Local { prediction: &'a EditPrediction }, - Jump { prediction: &'a EditPrediction }, -} - -struct RegisteredBuffer { - snapshot: BufferSnapshot, - _subscriptions: [gpui::Subscription; 2], -} - -#[derive(Clone)] -pub enum Event { - BufferChange { - old_snapshot: BufferSnapshot, - new_snapshot: BufferSnapshot, - end_edit_anchor: Option, - timestamp: Instant, - }, -} - -impl Event { - pub fn to_request_event(&self, cx: &App) -> Option { - match self { - Event::BufferChange { - old_snapshot, - new_snapshot, - .. - } => { - let path = new_snapshot.file().map(|f| f.full_path(cx)); - - let old_path = old_snapshot.file().and_then(|f| { - let old_path = f.full_path(cx); - if Some(&old_path) != path.as_ref() { - Some(old_path) - } else { - None - } - }); - - // TODO [zeta2] move to bg? - let diff = language::unified_diff(&old_snapshot.text(), &new_snapshot.text()); - - if path == old_path && diff.is_empty() { - None - } else { - Some(predict_edits_v3::Event::BufferChange { - old_path, - path, - diff, - //todo: Actually detect if this edit was predicted or not - predicted: false, - }) - } - } - } - } - - pub fn project_path(&self, cx: &App) -> Option { - match self { - Event::BufferChange { new_snapshot, .. } => new_snapshot - .file() - .map(|f| project::ProjectPath::from_file(f.as_ref(), cx)), - } - } -} - -impl Zeta { - pub fn try_global(cx: &App) -> Option> { - cx.try_global::().map(|global| global.0.clone()) - } - - pub fn global( - client: &Arc, - user_store: &Entity, - cx: &mut App, - ) -> Entity { - cx.try_global::() - .map(|global| global.0.clone()) - .unwrap_or_else(|| { - let zeta = cx.new(|cx| Self::new(client.clone(), user_store.clone(), cx)); - cx.set_global(ZetaGlobal(zeta.clone())); - zeta - }) - } - - pub fn new(client: Arc, user_store: Entity, cx: &mut Context) -> Self { - let refresh_llm_token_listener = RefreshLlmTokenListener::global(cx); - - Self { - projects: HashMap::default(), - client, - user_store, - options: DEFAULT_OPTIONS, - llm_token: LlmApiToken::default(), - _llm_token_subscription: cx.subscribe( - &refresh_llm_token_listener, - |this, _listener, _event, cx| { - let client = this.client.clone(); - let llm_token = this.llm_token.clone(); - cx.spawn(async move |_this, _cx| { - llm_token.refresh(&client).await?; - anyhow::Ok(()) - }) - .detach_and_log_err(cx); - }, - ), - update_required: false, - debug_tx: None, - #[cfg(feature = "eval-support")] - eval_cache: None, - edit_prediction_model: ZetaEditPredictionModel::ZedCloud, - sweep_api_token: std::env::var("SWEEP_AI_TOKEN") - .context("No SWEEP_AI_TOKEN environment variable set") - .log_err(), - sweep_ai_debug_info: sweep_ai::debug_info(cx), - } - } - - pub fn set_edit_prediction_model(&mut self, model: ZetaEditPredictionModel) { - self.edit_prediction_model = model; - } - - pub fn has_sweep_api_token(&self) -> bool { - self.sweep_api_token.is_some() - } - - #[cfg(feature = "eval-support")] - pub fn with_eval_cache(&mut self, cache: Arc) { - self.eval_cache = Some(cache); - } - - pub fn debug_info(&mut self) -> mpsc::UnboundedReceiver { - let (debug_watch_tx, debug_watch_rx) = mpsc::unbounded(); - self.debug_tx = Some(debug_watch_tx); - debug_watch_rx - } - - pub fn options(&self) -> &ZetaOptions { - &self.options - } - - pub fn set_options(&mut self, options: ZetaOptions) { - self.options = options; - } - - pub fn clear_history(&mut self) { - for zeta_project in self.projects.values_mut() { - zeta_project.events.clear(); - } - } - - pub fn history_for_project( - &self, - project: &Entity, - ) -> impl DoubleEndedIterator { - self.projects - .get(&project.entity_id()) - .map(|project| project.events.iter()) - .into_iter() - .flatten() - } - - pub fn context_for_project( - &self, - project: &Entity, - ) -> impl Iterator, &[Range])> { - self.projects - .get(&project.entity_id()) - .and_then(|project| { - Some( - project - .context - .as_ref()? - .iter() - .map(|(buffer, ranges)| (buffer.clone(), ranges.as_slice())), - ) - }) - .into_iter() - .flatten() - } - - pub fn usage(&self, cx: &App) -> Option { - if self.edit_prediction_model == ZetaEditPredictionModel::ZedCloud { - self.user_store.read(cx).edit_prediction_usage() - } else { - None - } - } - - pub fn register_project(&mut self, project: &Entity, cx: &mut Context) { - self.get_or_init_zeta_project(project, cx); - } - - pub fn register_buffer( - &mut self, - buffer: &Entity, - project: &Entity, - cx: &mut Context, - ) { - let zeta_project = self.get_or_init_zeta_project(project, cx); - Self::register_buffer_impl(zeta_project, buffer, project, cx); - } - - fn get_or_init_zeta_project( - &mut self, - project: &Entity, - cx: &mut Context, - ) -> &mut ZetaProject { - self.projects - .entry(project.entity_id()) - .or_insert_with(|| ZetaProject { - syntax_index: if let ContextMode::Syntax(_) = &self.options.context { - Some(cx.new(|cx| { - SyntaxIndex::new(project, self.options.file_indexing_parallelism, cx) - })) - } else { - None - }, - events: VecDeque::new(), - recent_paths: VecDeque::new(), - registered_buffers: HashMap::default(), - current_prediction: None, - pending_predictions: ArrayVec::new(), - next_pending_prediction_id: 0, - last_prediction_refresh: None, - context: None, - refresh_context_task: None, - refresh_context_debounce_task: None, - refresh_context_timestamp: None, - _subscription: cx.subscribe(&project, Self::handle_project_event), - }) - } - - fn handle_project_event( - &mut self, - project: Entity, - event: &project::Event, - cx: &mut Context, - ) { - // TODO [zeta2] init with recent paths - match event { - project::Event::ActiveEntryChanged(Some(active_entry_id)) => { - let Some(zeta_project) = self.projects.get_mut(&project.entity_id()) else { - return; - }; - let path = project.read(cx).path_for_entry(*active_entry_id, cx); - if let Some(path) = path { - if let Some(ix) = zeta_project - .recent_paths - .iter() - .position(|probe| probe == &path) - { - zeta_project.recent_paths.remove(ix); - } - zeta_project.recent_paths.push_front(path); - } - } - project::Event::DiagnosticsUpdated { .. } => { - self.refresh_prediction_from_diagnostics(project, cx); - } - _ => (), - } - } - - fn register_buffer_impl<'a>( - zeta_project: &'a mut ZetaProject, - buffer: &Entity, - project: &Entity, - cx: &mut Context, - ) -> &'a mut RegisteredBuffer { - let buffer_id = buffer.entity_id(); - match zeta_project.registered_buffers.entry(buffer_id) { - hash_map::Entry::Occupied(entry) => entry.into_mut(), - hash_map::Entry::Vacant(entry) => { - let snapshot = buffer.read(cx).snapshot(); - let project_entity_id = project.entity_id(); - entry.insert(RegisteredBuffer { - snapshot, - _subscriptions: [ - cx.subscribe(buffer, { - let project = project.downgrade(); - move |this, buffer, event, cx| { - if let language::BufferEvent::Edited = event - && let Some(project) = project.upgrade() - { - this.report_changes_for_buffer(&buffer, &project, cx); - } - } - }), - cx.observe_release(buffer, move |this, _buffer, _cx| { - let Some(zeta_project) = this.projects.get_mut(&project_entity_id) - else { - return; - }; - zeta_project.registered_buffers.remove(&buffer_id); - }), - ], - }) - } - } - } - - fn report_changes_for_buffer( - &mut self, - buffer: &Entity, - project: &Entity, - cx: &mut Context, - ) { - let event_count_max = match self.edit_prediction_model { - ZetaEditPredictionModel::ZedCloud => EVENT_COUNT_MAX_ZETA, - ZetaEditPredictionModel::Sweep => EVENT_COUNT_MAX_SWEEP, - }; - - let sweep_ai_project = self.get_or_init_zeta_project(project, cx); - let registered_buffer = Self::register_buffer_impl(sweep_ai_project, buffer, project, cx); - - let new_snapshot = buffer.read(cx).snapshot(); - if new_snapshot.version == registered_buffer.snapshot.version { - return; - } - - let old_snapshot = mem::replace(&mut registered_buffer.snapshot, new_snapshot.clone()); - let end_edit_anchor = new_snapshot - .anchored_edits_since::(&old_snapshot.version) - .last() - .map(|(_, range)| range.end); - let events = &mut sweep_ai_project.events; - - if let Some(Event::BufferChange { - new_snapshot: last_new_snapshot, - end_edit_anchor: last_end_edit_anchor, - .. - }) = events.back_mut() - { - let is_next_snapshot_of_same_buffer = old_snapshot.remote_id() - == last_new_snapshot.remote_id() - && old_snapshot.version == last_new_snapshot.version; - - let should_coalesce = is_next_snapshot_of_same_buffer - && end_edit_anchor - .as_ref() - .zip(last_end_edit_anchor.as_ref()) - .is_some_and(|(a, b)| { - let a = a.to_point(&new_snapshot); - let b = b.to_point(&new_snapshot); - a.row.abs_diff(b.row) <= CHANGE_GROUPING_LINE_SPAN - }); - - if should_coalesce { - *last_end_edit_anchor = end_edit_anchor; - *last_new_snapshot = new_snapshot; - return; - } - } - - if events.len() >= event_count_max { - events.pop_front(); - } - - events.push_back(Event::BufferChange { - old_snapshot, - new_snapshot, - end_edit_anchor, - timestamp: Instant::now(), - }); - } - - fn current_prediction_for_buffer( - &self, - buffer: &Entity, - project: &Entity, - cx: &App, - ) -> Option> { - let project_state = self.projects.get(&project.entity_id())?; - - let CurrentEditPrediction { - requested_by, - prediction, - } = project_state.current_prediction.as_ref()?; - - if prediction.targets_buffer(buffer.read(cx)) { - Some(BufferEditPrediction::Local { prediction }) - } else { - let show_jump = match requested_by { - PredictionRequestedBy::Buffer(requested_by_buffer_id) => { - requested_by_buffer_id == &buffer.entity_id() - } - PredictionRequestedBy::DiagnosticsUpdate => true, - }; - - if show_jump { - Some(BufferEditPrediction::Jump { prediction }) - } else { - None - } - } - } - - fn accept_current_prediction(&mut self, project: &Entity, cx: &mut Context) { - if self.edit_prediction_model != ZetaEditPredictionModel::ZedCloud { - return; - } - - let Some(project_state) = self.projects.get_mut(&project.entity_id()) else { - return; - }; - - let Some(prediction) = project_state.current_prediction.take() else { - return; - }; - let request_id = prediction.prediction.id.to_string(); - project_state.pending_predictions.clear(); - - let client = self.client.clone(); - let llm_token = self.llm_token.clone(); - let app_version = AppVersion::global(cx); - cx.spawn(async move |this, cx| { - let url = if let Ok(predict_edits_url) = env::var("ZED_ACCEPT_PREDICTION_URL") { - http_client::Url::parse(&predict_edits_url)? - } else { - client - .http_client() - .build_zed_llm_url("/predict_edits/accept", &[])? - }; - - let response = cx - .background_spawn(Self::send_api_request::<()>( - move |builder| { - let req = builder.uri(url.as_ref()).body( - serde_json::to_string(&AcceptEditPredictionBody { - request_id: request_id.clone(), - })? - .into(), - ); - Ok(req?) - }, - client, - llm_token, - app_version, - )) - .await; - - Self::handle_api_response(&this, response, cx)?; - anyhow::Ok(()) - }) - .detach_and_log_err(cx); - } - - fn discard_current_prediction(&mut self, project: &Entity) { - if let Some(project_state) = self.projects.get_mut(&project.entity_id()) { - project_state.current_prediction.take(); - project_state.pending_predictions.clear(); - }; - } - - fn is_refreshing(&self, project: &Entity) -> bool { - self.projects - .get(&project.entity_id()) - .is_some_and(|project_state| !project_state.pending_predictions.is_empty()) - } - - pub fn refresh_prediction_from_buffer( - &mut self, - project: Entity, - buffer: Entity, - position: language::Anchor, - cx: &mut Context, - ) { - self.queue_prediction_refresh(project.clone(), buffer.entity_id(), cx, move |this, cx| { - let Some(request_task) = this - .update(cx, |this, cx| { - this.request_prediction(&project, &buffer, position, cx) - }) - .log_err() - else { - return Task::ready(anyhow::Ok(())); - }; - - let project = project.clone(); - cx.spawn(async move |cx| { - if let Some(prediction) = request_task.await? { - this.update(cx, |this, cx| { - let project_state = this - .projects - .get_mut(&project.entity_id()) - .context("Project not found")?; - - let new_prediction = CurrentEditPrediction { - requested_by: PredictionRequestedBy::Buffer(buffer.entity_id()), - prediction: prediction, - }; - - if project_state - .current_prediction - .as_ref() - .is_none_or(|old_prediction| { - new_prediction.should_replace_prediction(&old_prediction, cx) - }) - { - project_state.current_prediction = Some(new_prediction); - cx.notify(); - } - anyhow::Ok(()) - })??; - } - Ok(()) - }) - }) - } - - pub fn refresh_prediction_from_diagnostics( - &mut self, - project: Entity, - cx: &mut Context, - ) { - let Some(zeta_project) = self.projects.get_mut(&project.entity_id()) else { - return; - }; - - // Prefer predictions from buffer - if zeta_project.current_prediction.is_some() { - return; - }; - - self.queue_prediction_refresh(project.clone(), project.entity_id(), cx, move |this, cx| { - let Some(open_buffer_task) = project - .update(cx, |project, cx| { - project - .active_entry() - .and_then(|entry| project.path_for_entry(entry, cx)) - .map(|path| project.open_buffer(path, cx)) - }) - .log_err() - .flatten() - else { - return Task::ready(anyhow::Ok(())); - }; - - cx.spawn(async move |cx| { - let active_buffer = open_buffer_task.await?; - let snapshot = active_buffer.read_with(cx, |buffer, _cx| buffer.snapshot())?; - - let Some((jump_buffer, jump_position)) = Self::next_diagnostic_location( - active_buffer, - &snapshot, - Default::default(), - Default::default(), - &project, - cx, - ) - .await? - else { - return anyhow::Ok(()); - }; - - let Some(prediction) = this - .update(cx, |this, cx| { - this.request_prediction(&project, &jump_buffer, jump_position, cx) - })? - .await? - else { - return anyhow::Ok(()); - }; - - this.update(cx, |this, cx| { - if let Some(zeta_project) = this.projects.get_mut(&project.entity_id()) { - zeta_project.current_prediction.get_or_insert_with(|| { - cx.notify(); - CurrentEditPrediction { - requested_by: PredictionRequestedBy::DiagnosticsUpdate, - prediction, - } - }); - } - })?; - - anyhow::Ok(()) - }) - }); - } - - #[cfg(not(test))] - pub const THROTTLE_TIMEOUT: Duration = Duration::from_millis(300); - #[cfg(test)] - pub const THROTTLE_TIMEOUT: Duration = Duration::ZERO; - - fn queue_prediction_refresh( - &mut self, - project: Entity, - throttle_entity: EntityId, - cx: &mut Context, - do_refresh: impl FnOnce(WeakEntity, &mut AsyncApp) -> Task> + 'static, - ) { - let zeta_project = self.get_or_init_zeta_project(&project, cx); - let pending_prediction_id = zeta_project.next_pending_prediction_id; - zeta_project.next_pending_prediction_id += 1; - let last_request = zeta_project.last_prediction_refresh; - - // TODO report cancelled requests like in zeta1 - let task = cx.spawn(async move |this, cx| { - if let Some((last_entity, last_timestamp)) = last_request - && throttle_entity == last_entity - && let Some(timeout) = - (last_timestamp + Self::THROTTLE_TIMEOUT).checked_duration_since(Instant::now()) - { - cx.background_executor().timer(timeout).await; - } - - do_refresh(this.clone(), cx).await.log_err(); - - this.update(cx, |this, cx| { - let zeta_project = this.get_or_init_zeta_project(&project, cx); - - if zeta_project.pending_predictions[0].id == pending_prediction_id { - zeta_project.pending_predictions.remove(0); - } else { - zeta_project.pending_predictions.clear(); - } - - cx.notify(); - }) - .ok(); - }); - - if zeta_project.pending_predictions.len() <= 1 { - zeta_project.pending_predictions.push(PendingPrediction { - id: pending_prediction_id, - _task: task, - }); - } else if zeta_project.pending_predictions.len() == 2 { - zeta_project.pending_predictions.pop(); - zeta_project.pending_predictions.push(PendingPrediction { - id: pending_prediction_id, - _task: task, - }); - } - } - - pub fn request_prediction( - &mut self, - project: &Entity, - active_buffer: &Entity, - position: language::Anchor, - cx: &mut Context, - ) -> Task>> { - match self.edit_prediction_model { - ZetaEditPredictionModel::ZedCloud => { - self.request_prediction_with_zed_cloud(project, active_buffer, position, cx) - } - ZetaEditPredictionModel::Sweep => { - self.request_prediction_with_sweep(project, active_buffer, position, true, cx) - } - } - } - - fn request_prediction_with_sweep( - &mut self, - project: &Entity, - active_buffer: &Entity, - position: language::Anchor, - allow_jump: bool, - cx: &mut Context, - ) -> Task>> { - let snapshot = active_buffer.read(cx).snapshot(); - let debug_info = self.sweep_ai_debug_info.clone(); - let Some(api_token) = self.sweep_api_token.clone() else { - return Task::ready(Ok(None)); - }; - let full_path: Arc = snapshot - .file() - .map(|file| file.full_path(cx)) - .unwrap_or_else(|| "untitled".into()) - .into(); - - let project_file = project::File::from_dyn(snapshot.file()); - let repo_name = project_file - .map(|file| file.worktree.read(cx).root_name_str()) - .unwrap_or("untitled") - .into(); - let offset = position.to_offset(&snapshot); - - let project_state = self.get_or_init_zeta_project(project, cx); - let events = project_state.events.clone(); - let has_events = !events.is_empty(); - let recent_buffers = project_state.recent_paths.iter().cloned(); - let http_client = cx.http_client(); - - let recent_buffer_snapshots = recent_buffers - .filter_map(|project_path| { - let buffer = project.read(cx).get_open_buffer(&project_path, cx)?; - if active_buffer == &buffer { - None - } else { - Some(buffer.read(cx).snapshot()) - } - }) - .take(3) - .collect::>(); - - const DIAGNOSTIC_LINES_RANGE: u32 = 20; - - let cursor_point = position.to_point(&snapshot); - let diagnostic_search_start = cursor_point.row.saturating_sub(DIAGNOSTIC_LINES_RANGE); - let diagnostic_search_end = cursor_point.row + DIAGNOSTIC_LINES_RANGE; - let diagnostic_search_range = - Point::new(diagnostic_search_start, 0)..Point::new(diagnostic_search_end, 0); - - let result = cx.background_spawn({ - let snapshot = snapshot.clone(); - let diagnostic_search_range = diagnostic_search_range.clone(); - async move { - let text = snapshot.text(); - - let mut recent_changes = String::new(); - for event in events { - sweep_ai::write_event(event, &mut recent_changes).unwrap(); - } - - let mut file_chunks = recent_buffer_snapshots - .into_iter() - .map(|snapshot| { - let end_point = Point::new(30, 0).min(snapshot.max_point()); - sweep_ai::FileChunk { - content: snapshot.text_for_range(Point::zero()..end_point).collect(), - file_path: snapshot - .file() - .map(|f| f.path().as_unix_str()) - .unwrap_or("untitled") - .to_string(), - start_line: 0, - end_line: end_point.row as usize, - timestamp: snapshot.file().and_then(|file| { - Some( - file.disk_state() - .mtime()? - .to_seconds_and_nanos_for_persistence()? - .0, - ) - }), - } - }) - .collect::>(); - - let diagnostic_entries = - snapshot.diagnostics_in_range(diagnostic_search_range, false); - let mut diagnostic_content = String::new(); - let mut diagnostic_count = 0; - - for entry in diagnostic_entries { - let start_point: Point = entry.range.start; - - let severity = match entry.diagnostic.severity { - DiagnosticSeverity::ERROR => "error", - DiagnosticSeverity::WARNING => "warning", - DiagnosticSeverity::INFORMATION => "info", - DiagnosticSeverity::HINT => "hint", - _ => continue, - }; - - diagnostic_count += 1; - - writeln!( - &mut diagnostic_content, - "{} at line {}: {}", - severity, - start_point.row + 1, - entry.diagnostic.message - )?; - } - - if !diagnostic_content.is_empty() { - file_chunks.push(sweep_ai::FileChunk { - file_path: format!("Diagnostics for {}", full_path.display()), - start_line: 0, - end_line: diagnostic_count, - content: diagnostic_content, - timestamp: None, - }); - } - - let request_body = sweep_ai::AutocompleteRequest { - debug_info, - repo_name, - file_path: full_path.clone(), - file_contents: text.clone(), - original_file_contents: text, - cursor_position: offset, - recent_changes: recent_changes.clone(), - changes_above_cursor: true, - multiple_suggestions: false, - branch: None, - file_chunks, - retrieval_chunks: vec![], - recent_user_actions: vec![], - // TODO - privacy_mode_enabled: false, - }; - - let mut buf: Vec = Vec::new(); - let writer = brotli::CompressorWriter::new(&mut buf, 4096, 11, 22); - serde_json::to_writer(writer, &request_body)?; - let body: AsyncBody = buf.into(); - - const SWEEP_API_URL: &str = - "https://autocomplete.sweep.dev/backend/next_edit_autocomplete"; - - let request = http_client::Request::builder() - .uri(SWEEP_API_URL) - .header("Content-Type", "application/json") - .header("Authorization", format!("Bearer {}", api_token)) - .header("Connection", "keep-alive") - .header("Content-Encoding", "br") - .method(Method::POST) - .body(body)?; - - let mut response = http_client.send(request).await?; - - let mut body: Vec = Vec::new(); - response.body_mut().read_to_end(&mut body).await?; - - if !response.status().is_success() { - anyhow::bail!( - "Request failed with status: {:?}\nBody: {}", - response.status(), - String::from_utf8_lossy(&body), - ); - }; - - let response: sweep_ai::AutocompleteResponse = serde_json::from_slice(&body)?; - - let old_text = snapshot - .text_for_range(response.start_index..response.end_index) - .collect::(); - let edits = language::text_diff(&old_text, &response.completion) - .into_iter() - .map(|(range, text)| { - ( - snapshot.anchor_after(response.start_index + range.start) - ..snapshot.anchor_before(response.start_index + range.end), - text, - ) - }) - .collect::>(); - - anyhow::Ok((response.autocomplete_id, edits, snapshot)) - } - }); - - let buffer = active_buffer.clone(); - let project = project.clone(); - let active_buffer = active_buffer.clone(); - - cx.spawn(async move |this, cx| { - let (id, edits, old_snapshot) = result.await?; - - if edits.is_empty() { - if has_events - && allow_jump - && let Some((jump_buffer, jump_position)) = Self::next_diagnostic_location( - active_buffer, - &snapshot, - diagnostic_search_range, - cursor_point, - &project, - cx, - ) - .await? - { - return this - .update(cx, |this, cx| { - this.request_prediction_with_sweep( - &project, - &jump_buffer, - jump_position, - false, - cx, - ) - })? - .await; - } - - return anyhow::Ok(None); - } - - let Some((edits, new_snapshot, preview_task)) = - buffer.read_with(cx, |buffer, cx| { - let new_snapshot = buffer.snapshot(); - - let edits: Arc<[(Range, Arc)]> = - edit_prediction::interpolate_edits(&old_snapshot, &new_snapshot, &edits)? - .into(); - let preview_task = buffer.preview_edits(edits.clone(), cx); - - Some((edits, new_snapshot, preview_task)) - })? - else { - return anyhow::Ok(None); - }; - - let prediction = EditPrediction { - id: EditPredictionId(id.into()), - edits, - snapshot: new_snapshot, - edit_preview: preview_task.await, - buffer, - }; - - anyhow::Ok(Some(prediction)) - }) - } - - async fn next_diagnostic_location( - active_buffer: Entity, - active_buffer_snapshot: &BufferSnapshot, - active_buffer_diagnostic_search_range: Range, - active_buffer_cursor_point: Point, - project: &Entity, - cx: &mut AsyncApp, - ) -> Result, language::Anchor)>> { - // find the closest diagnostic to the cursor that wasn't close enough to be included in the last request - let mut jump_location = active_buffer_snapshot - .diagnostic_groups(None) - .into_iter() - .filter_map(|(_, group)| { - let range = &group.entries[group.primary_ix] - .range - .to_point(&active_buffer_snapshot); - if range.overlaps(&active_buffer_diagnostic_search_range) { - None - } else { - Some(range.start) - } - }) - .min_by_key(|probe| probe.row.abs_diff(active_buffer_cursor_point.row)) - .map(|position| { - ( - active_buffer.clone(), - active_buffer_snapshot.anchor_before(position), - ) - }); - - if jump_location.is_none() { - let active_buffer_path = active_buffer.read_with(cx, |buffer, cx| { - let file = buffer.file()?; - - Some(ProjectPath { - worktree_id: file.worktree_id(cx), - path: file.path().clone(), - }) - })?; - - let buffer_task = project.update(cx, |project, cx| { - let (path, _, _) = project - .diagnostic_summaries(false, cx) - .filter(|(path, _, _)| Some(path) != active_buffer_path.as_ref()) - .max_by_key(|(path, _, _)| { - // find the buffer with errors that shares most parent directories - path.path - .components() - .zip( - active_buffer_path - .as_ref() - .map(|p| p.path.components()) - .unwrap_or_default(), - ) - .take_while(|(a, b)| a == b) - .count() - })?; - - Some(project.open_buffer(path, cx)) - })?; - - if let Some(buffer_task) = buffer_task { - let closest_buffer = buffer_task.await?; - - jump_location = closest_buffer - .read_with(cx, |buffer, _cx| { - buffer - .buffer_diagnostics(None) - .into_iter() - .min_by_key(|entry| entry.diagnostic.severity) - .map(|entry| entry.range.start) - })? - .map(|position| (closest_buffer, position)); - } - } - - anyhow::Ok(jump_location) - } - - fn request_prediction_with_zed_cloud( - &mut self, - project: &Entity, - active_buffer: &Entity, - position: language::Anchor, - cx: &mut Context, - ) -> Task>> { - let project_state = self.projects.get(&project.entity_id()); - - let index_state = project_state.and_then(|state| { - state - .syntax_index - .as_ref() - .map(|syntax_index| syntax_index.read_with(cx, |index, _cx| index.state().clone())) - }); - let options = self.options.clone(); - let active_snapshot = active_buffer.read(cx).snapshot(); - let Some(excerpt_path) = active_snapshot - .file() - .map(|path| -> Arc { path.full_path(cx).into() }) - else { - return Task::ready(Err(anyhow!("No file path for excerpt"))); - }; - let client = self.client.clone(); - let llm_token = self.llm_token.clone(); - let app_version = AppVersion::global(cx); - let worktree_snapshots = project - .read(cx) - .worktrees(cx) - .map(|worktree| worktree.read(cx).snapshot()) - .collect::>(); - let debug_tx = self.debug_tx.clone(); - - let events = project_state - .map(|state| { - state - .events - .iter() - .filter_map(|event| event.to_request_event(cx)) - .collect::>() - }) - .unwrap_or_default(); - - let diagnostics = active_snapshot.diagnostic_sets().clone(); - - let parent_abs_path = - project::File::from_dyn(active_buffer.read(cx).file()).and_then(|f| { - let mut path = f.worktree.read(cx).absolutize(&f.path); - if path.pop() { Some(path) } else { None } - }); - - // TODO data collection - let can_collect_data = cx.is_staff(); - - let empty_context_files = HashMap::default(); - let context_files = project_state - .and_then(|project_state| project_state.context.as_ref()) - .unwrap_or(&empty_context_files); - - #[cfg(feature = "eval-support")] - let parsed_fut = futures::future::join_all( - context_files - .keys() - .map(|buffer| buffer.read(cx).parsing_idle()), - ); - - let mut included_files = context_files - .iter() - .filter_map(|(buffer_entity, ranges)| { - let buffer = buffer_entity.read(cx); - Some(( - buffer_entity.clone(), - buffer.snapshot(), - buffer.file()?.full_path(cx).into(), - ranges.clone(), - )) - }) - .collect::>(); - - included_files.sort_by(|(_, _, path_a, ranges_a), (_, _, path_b, ranges_b)| { - (path_a, ranges_a.len()).cmp(&(path_b, ranges_b.len())) - }); - - #[cfg(feature = "eval-support")] - let eval_cache = self.eval_cache.clone(); - - let request_task = cx.background_spawn({ - let active_buffer = active_buffer.clone(); - async move { - #[cfg(feature = "eval-support")] - parsed_fut.await; - - let index_state = if let Some(index_state) = index_state { - Some(index_state.lock_owned().await) - } else { - None - }; - - let cursor_offset = position.to_offset(&active_snapshot); - let cursor_point = cursor_offset.to_point(&active_snapshot); - - let before_retrieval = chrono::Utc::now(); - - let (diagnostic_groups, diagnostic_groups_truncated) = - Self::gather_nearby_diagnostics( - cursor_offset, - &diagnostics, - &active_snapshot, - options.max_diagnostic_bytes, - ); - - let cloud_request = match options.context { - ContextMode::Agentic(context_options) => { - let Some(excerpt) = EditPredictionExcerpt::select_from_buffer( - cursor_point, - &active_snapshot, - &context_options.excerpt, - index_state.as_deref(), - ) else { - return Ok((None, None)); - }; - - let excerpt_anchor_range = active_snapshot.anchor_after(excerpt.range.start) - ..active_snapshot.anchor_before(excerpt.range.end); - - if let Some(buffer_ix) = - included_files.iter().position(|(_, snapshot, _, _)| { - snapshot.remote_id() == active_snapshot.remote_id() - }) - { - let (_, buffer, _, ranges) = &mut included_files[buffer_ix]; - ranges.push(excerpt_anchor_range); - retrieval_search::merge_anchor_ranges(ranges, buffer); - let last_ix = included_files.len() - 1; - included_files.swap(buffer_ix, last_ix); - } else { - included_files.push(( - active_buffer.clone(), - active_snapshot.clone(), - excerpt_path.clone(), - vec![excerpt_anchor_range], - )); - } - - let included_files = included_files - .iter() - .map(|(_, snapshot, path, ranges)| { - let ranges = ranges - .iter() - .map(|range| { - let point_range = range.to_point(&snapshot); - Line(point_range.start.row)..Line(point_range.end.row) - }) - .collect::>(); - let excerpts = assemble_excerpts(&snapshot, ranges); - predict_edits_v3::IncludedFile { - path: path.clone(), - max_row: Line(snapshot.max_point().row), - excerpts, - } - }) - .collect::>(); - - predict_edits_v3::PredictEditsRequest { - excerpt_path, - excerpt: String::new(), - excerpt_line_range: Line(0)..Line(0), - excerpt_range: 0..0, - cursor_point: predict_edits_v3::Point { - line: predict_edits_v3::Line(cursor_point.row), - column: cursor_point.column, - }, - included_files, - referenced_declarations: vec![], - events, - can_collect_data, - diagnostic_groups, - diagnostic_groups_truncated, - debug_info: debug_tx.is_some(), - prompt_max_bytes: Some(options.max_prompt_bytes), - prompt_format: options.prompt_format, - // TODO [zeta2] - signatures: vec![], - excerpt_parent: None, - git_info: None, - } - } - ContextMode::Syntax(context_options) => { - let Some(context) = EditPredictionContext::gather_context( - cursor_point, - &active_snapshot, - parent_abs_path.as_deref(), - &context_options, - index_state.as_deref(), - ) else { - return Ok((None, None)); - }; - - make_syntax_context_cloud_request( - excerpt_path, - context, - events, - can_collect_data, - diagnostic_groups, - diagnostic_groups_truncated, - None, - debug_tx.is_some(), - &worktree_snapshots, - index_state.as_deref(), - Some(options.max_prompt_bytes), - options.prompt_format, - ) - } - }; - - let prompt_result = cloud_zeta2_prompt::build_prompt(&cloud_request); - - let retrieval_time = chrono::Utc::now() - before_retrieval; - - let debug_response_tx = if let Some(debug_tx) = &debug_tx { - let (response_tx, response_rx) = oneshot::channel(); - - debug_tx - .unbounded_send(ZetaDebugInfo::EditPredictionRequested( - ZetaEditPredictionDebugInfo { - request: cloud_request.clone(), - retrieval_time, - buffer: active_buffer.downgrade(), - local_prompt: match prompt_result.as_ref() { - Ok((prompt, _)) => Ok(prompt.clone()), - Err(err) => Err(err.to_string()), - }, - position, - response_rx, - }, - )) - .ok(); - Some(response_tx) - } else { - None - }; - - if cfg!(debug_assertions) && env::var("ZED_ZETA2_SKIP_REQUEST").is_ok() { - if let Some(debug_response_tx) = debug_response_tx { - debug_response_tx - .send((Err("Request skipped".to_string()), TimeDelta::zero())) - .ok(); - } - anyhow::bail!("Skipping request because ZED_ZETA2_SKIP_REQUEST is set") - } - - let (prompt, _) = prompt_result?; - let generation_params = - cloud_zeta2_prompt::generation_params(cloud_request.prompt_format); - let request = open_ai::Request { - model: EDIT_PREDICTIONS_MODEL_ID.clone(), - messages: vec![open_ai::RequestMessage::User { - content: open_ai::MessageContent::Plain(prompt), - }], - stream: false, - max_completion_tokens: None, - stop: generation_params.stop.unwrap_or_default(), - temperature: generation_params.temperature.unwrap_or(0.7), - tool_choice: None, - parallel_tool_calls: None, - tools: vec![], - prompt_cache_key: None, - reasoning_effort: None, - }; - - log::trace!("Sending edit prediction request"); - - let before_request = chrono::Utc::now(); - let response = Self::send_raw_llm_request( - request, - client, - llm_token, - app_version, - #[cfg(feature = "eval-support")] - eval_cache, - #[cfg(feature = "eval-support")] - EvalCacheEntryKind::Prediction, - ) - .await; - let request_time = chrono::Utc::now() - before_request; - - log::trace!("Got edit prediction response"); - - if let Some(debug_response_tx) = debug_response_tx { - debug_response_tx - .send(( - response - .as_ref() - .map_err(|err| err.to_string()) - .map(|response| response.0.clone()), - request_time, - )) - .ok(); - } - - let (res, usage) = response?; - let request_id = EditPredictionId(res.id.clone().into()); - let Some(mut output_text) = text_from_response(res) else { - return Ok((None, usage)); - }; - - if output_text.contains(CURSOR_MARKER) { - log::trace!("Stripping out {CURSOR_MARKER} from response"); - output_text = output_text.replace(CURSOR_MARKER, ""); - } - - let get_buffer_from_context = |path: &Path| { - included_files - .iter() - .find_map(|(_, buffer, probe_path, ranges)| { - if probe_path.as_ref() == path { - Some((buffer, ranges.as_slice())) - } else { - None - } - }) - }; - - let (edited_buffer_snapshot, edits) = match options.prompt_format { - PromptFormat::NumLinesUniDiff => { - // TODO: Implement parsing of multi-file diffs - crate::udiff::parse_diff(&output_text, get_buffer_from_context).await? - } - PromptFormat::Minimal - | PromptFormat::MinimalQwen - | PromptFormat::SeedCoder1120 => { - if output_text.contains("--- a/\n+++ b/\nNo edits") { - let edits = vec![]; - (&active_snapshot, edits) - } else { - crate::udiff::parse_diff(&output_text, get_buffer_from_context).await? - } - } - PromptFormat::OldTextNewText => { - crate::xml_edits::parse_xml_edits(&output_text, get_buffer_from_context) - .await? - } - _ => { - bail!("unsupported prompt format {}", options.prompt_format) - } - }; - - let edited_buffer = included_files - .iter() - .find_map(|(buffer, snapshot, _, _)| { - if snapshot.remote_id() == edited_buffer_snapshot.remote_id() { - Some(buffer.clone()) - } else { - None - } - }) - .context("Failed to find buffer in included_buffers")?; - - anyhow::Ok(( - Some(( - request_id, - edited_buffer, - edited_buffer_snapshot.clone(), - edits, - )), - usage, - )) - } - }); - - cx.spawn({ - async move |this, cx| { - let Some((id, edited_buffer, edited_buffer_snapshot, edits)) = - Self::handle_api_response(&this, request_task.await, cx)? - else { - return Ok(None); - }; - - // TODO telemetry: duration, etc - Ok( - EditPrediction::new(id, &edited_buffer, &edited_buffer_snapshot, edits, cx) - .await, - ) - } - }) - } - - async fn send_raw_llm_request( - request: open_ai::Request, - client: Arc, - llm_token: LlmApiToken, - app_version: Version, - #[cfg(feature = "eval-support")] eval_cache: Option>, - #[cfg(feature = "eval-support")] eval_cache_kind: EvalCacheEntryKind, - ) -> Result<(open_ai::Response, Option)> { - let url = if let Some(predict_edits_url) = PREDICT_EDITS_URL.as_ref() { - http_client::Url::parse(&predict_edits_url)? - } else { - client - .http_client() - .build_zed_llm_url("/predict_edits/raw", &[])? - }; - - #[cfg(feature = "eval-support")] - let cache_key = if let Some(cache) = eval_cache { - use collections::FxHasher; - use std::hash::{Hash, Hasher}; - - let mut hasher = FxHasher::default(); - url.hash(&mut hasher); - let request_str = serde_json::to_string_pretty(&request)?; - request_str.hash(&mut hasher); - let hash = hasher.finish(); - - let key = (eval_cache_kind, hash); - if let Some(response_str) = cache.read(key) { - return Ok((serde_json::from_str(&response_str)?, None)); - } - - Some((cache, request_str, key)) - } else { - None - }; - - let (response, usage) = Self::send_api_request( - |builder| { - let req = builder - .uri(url.as_ref()) - .body(serde_json::to_string(&request)?.into()); - Ok(req?) - }, - client, - llm_token, - app_version, - ) - .await?; - - #[cfg(feature = "eval-support")] - if let Some((cache, request, key)) = cache_key { - cache.write(key, &request, &serde_json::to_string_pretty(&response)?); - } - - Ok((response, usage)) - } - - fn handle_api_response( - this: &WeakEntity, - response: Result<(T, Option)>, - cx: &mut gpui::AsyncApp, - ) -> Result { - match response { - Ok((data, usage)) => { - if let Some(usage) = usage { - this.update(cx, |this, cx| { - this.user_store.update(cx, |user_store, cx| { - user_store.update_edit_prediction_usage(usage, cx); - }); - }) - .ok(); - } - Ok(data) - } - Err(err) => { - if err.is::() { - cx.update(|cx| { - this.update(cx, |this, _cx| { - this.update_required = true; - }) - .ok(); - - let error_message: SharedString = err.to_string().into(); - show_app_notification( - NotificationId::unique::(), - cx, - move |cx| { - cx.new(|cx| { - ErrorMessagePrompt::new(error_message.clone(), cx) - .with_link_button("Update Zed", "https://zed.dev/releases") - }) - }, - ); - }) - .ok(); - } - Err(err) - } - } - } - - async fn send_api_request( - build: impl Fn(http_client::http::request::Builder) -> Result>, - client: Arc, - llm_token: LlmApiToken, - app_version: Version, - ) -> Result<(Res, Option)> - where - Res: DeserializeOwned, - { - let http_client = client.http_client(); - let mut token = llm_token.acquire(&client).await?; - let mut did_retry = false; - - loop { - let request_builder = http_client::Request::builder().method(Method::POST); - - let request = build( - request_builder - .header("Content-Type", "application/json") - .header("Authorization", format!("Bearer {}", token)) - .header(ZED_VERSION_HEADER_NAME, app_version.to_string()), - )?; - - let mut response = http_client.send(request).await?; - - if let Some(minimum_required_version) = response - .headers() - .get(MINIMUM_REQUIRED_VERSION_HEADER_NAME) - .and_then(|version| Version::from_str(version.to_str().ok()?).ok()) - { - anyhow::ensure!( - app_version >= minimum_required_version, - ZedUpdateRequiredError { - minimum_version: minimum_required_version - } - ); - } - - if response.status().is_success() { - let usage = EditPredictionUsage::from_headers(response.headers()).ok(); - - let mut body = Vec::new(); - response.body_mut().read_to_end(&mut body).await?; - return Ok((serde_json::from_slice(&body)?, usage)); - } else if !did_retry - && response - .headers() - .get(EXPIRED_LLM_TOKEN_HEADER_NAME) - .is_some() - { - did_retry = true; - token = llm_token.refresh(&client).await?; - } else { - let mut body = String::new(); - response.body_mut().read_to_string(&mut body).await?; - anyhow::bail!( - "Request failed with status: {:?}\nBody: {}", - response.status(), - body - ); - } - } - } - - pub const CONTEXT_RETRIEVAL_IDLE_DURATION: Duration = Duration::from_secs(10); - pub const CONTEXT_RETRIEVAL_DEBOUNCE_DURATION: Duration = Duration::from_secs(3); - - // Refresh the related excerpts when the user just beguns editing after - // an idle period, and after they pause editing. - fn refresh_context_if_needed( - &mut self, - project: &Entity, - buffer: &Entity, - cursor_position: language::Anchor, - cx: &mut Context, - ) { - if !matches!(&self.options().context, ContextMode::Agentic { .. }) { - return; - } - - let Some(zeta_project) = self.projects.get_mut(&project.entity_id()) else { - return; - }; - - let now = Instant::now(); - let was_idle = zeta_project - .refresh_context_timestamp - .map_or(true, |timestamp| { - now - timestamp > Self::CONTEXT_RETRIEVAL_IDLE_DURATION - }); - zeta_project.refresh_context_timestamp = Some(now); - zeta_project.refresh_context_debounce_task = Some(cx.spawn({ - let buffer = buffer.clone(); - let project = project.clone(); - async move |this, cx| { - if was_idle { - log::debug!("refetching edit prediction context after idle"); - } else { - cx.background_executor() - .timer(Self::CONTEXT_RETRIEVAL_DEBOUNCE_DURATION) - .await; - log::debug!("refetching edit prediction context after pause"); - } - this.update(cx, |this, cx| { - let task = this.refresh_context(project.clone(), buffer, cursor_position, cx); - - if let Some(zeta_project) = this.projects.get_mut(&project.entity_id()) { - zeta_project.refresh_context_task = Some(task.log_err()); - }; - }) - .ok() - } - })); - } - - // Refresh the related excerpts asynchronously. Ensure the task runs to completion, - // and avoid spawning more than one concurrent task. - pub fn refresh_context( - &mut self, - project: Entity, - buffer: Entity, - cursor_position: language::Anchor, - cx: &mut Context, - ) -> Task> { - let Some(zeta_project) = self.projects.get(&project.entity_id()) else { - return Task::ready(anyhow::Ok(())); - }; - - let ContextMode::Agentic(options) = &self.options().context else { - return Task::ready(anyhow::Ok(())); - }; - - let snapshot = buffer.read(cx).snapshot(); - let cursor_point = cursor_position.to_point(&snapshot); - let Some(cursor_excerpt) = EditPredictionExcerpt::select_from_buffer( - cursor_point, - &snapshot, - &options.excerpt, - None, - ) else { - return Task::ready(Ok(())); - }; - - let app_version = AppVersion::global(cx); - let client = self.client.clone(); - let llm_token = self.llm_token.clone(); - let debug_tx = self.debug_tx.clone(); - let current_file_path: Arc = snapshot - .file() - .map(|f| f.full_path(cx).into()) - .unwrap_or_else(|| Path::new("untitled").into()); - - let prompt = match cloud_zeta2_prompt::retrieval_prompt::build_prompt( - predict_edits_v3::PlanContextRetrievalRequest { - excerpt: cursor_excerpt.text(&snapshot).body, - excerpt_path: current_file_path, - excerpt_line_range: cursor_excerpt.line_range, - cursor_file_max_row: Line(snapshot.max_point().row), - events: zeta_project - .events - .iter() - .filter_map(|ev| ev.to_request_event(cx)) - .collect(), - }, - ) { - Ok(prompt) => prompt, - Err(err) => { - return Task::ready(Err(err)); - } - }; - - if let Some(debug_tx) = &debug_tx { - debug_tx - .unbounded_send(ZetaDebugInfo::ContextRetrievalStarted( - ZetaContextRetrievalStartedDebugInfo { - project: project.clone(), - timestamp: Instant::now(), - search_prompt: prompt.clone(), - }, - )) - .ok(); - } - - pub static TOOL_SCHEMA: LazyLock<(serde_json::Value, String)> = LazyLock::new(|| { - let schema = language_model::tool_schema::root_schema_for::( - language_model::LanguageModelToolSchemaFormat::JsonSchemaSubset, - ); - - let description = schema - .get("description") - .and_then(|description| description.as_str()) - .unwrap() - .to_string(); - - (schema.into(), description) - }); - - let (tool_schema, tool_description) = TOOL_SCHEMA.clone(); - - let request = open_ai::Request { - model: CONTEXT_RETRIEVAL_MODEL_ID.clone(), - messages: vec![open_ai::RequestMessage::User { - content: open_ai::MessageContent::Plain(prompt), - }], - stream: false, - max_completion_tokens: None, - stop: Default::default(), - temperature: 0.7, - tool_choice: None, - parallel_tool_calls: None, - tools: vec![open_ai::ToolDefinition::Function { - function: FunctionDefinition { - name: cloud_zeta2_prompt::retrieval_prompt::TOOL_NAME.to_string(), - description: Some(tool_description), - parameters: Some(tool_schema), - }, - }], - prompt_cache_key: None, - reasoning_effort: None, - }; - - #[cfg(feature = "eval-support")] - let eval_cache = self.eval_cache.clone(); - - cx.spawn(async move |this, cx| { - log::trace!("Sending search planning request"); - let response = Self::send_raw_llm_request( - request, - client, - llm_token, - app_version, - #[cfg(feature = "eval-support")] - eval_cache.clone(), - #[cfg(feature = "eval-support")] - EvalCacheEntryKind::Context, - ) - .await; - let mut response = Self::handle_api_response(&this, response, cx)?; - log::trace!("Got search planning response"); - - let choice = response - .choices - .pop() - .context("No choices in retrieval response")?; - let open_ai::RequestMessage::Assistant { - content: _, - tool_calls, - } = choice.message - else { - anyhow::bail!("Retrieval response didn't include an assistant message"); - }; - - let mut queries: Vec = Vec::new(); - for tool_call in tool_calls { - let open_ai::ToolCallContent::Function { function } = tool_call.content; - if function.name != cloud_zeta2_prompt::retrieval_prompt::TOOL_NAME { - log::warn!( - "Context retrieval response tried to call an unknown tool: {}", - function.name - ); - - continue; - } - - let input: SearchToolInput = serde_json::from_str(&function.arguments) - .with_context(|| format!("invalid search json {}", &function.arguments))?; - queries.extend(input.queries); - } - - if let Some(debug_tx) = &debug_tx { - debug_tx - .unbounded_send(ZetaDebugInfo::SearchQueriesGenerated( - ZetaSearchQueryDebugInfo { - project: project.clone(), - timestamp: Instant::now(), - search_queries: queries.clone(), - }, - )) - .ok(); - } - - log::trace!("Running retrieval search: {queries:#?}"); - - let related_excerpts_result = retrieval_search::run_retrieval_searches( - queries, - project.clone(), - #[cfg(feature = "eval-support")] - eval_cache, - cx, - ) - .await; - - log::trace!("Search queries executed"); - - if let Some(debug_tx) = &debug_tx { - debug_tx - .unbounded_send(ZetaDebugInfo::SearchQueriesExecuted( - ZetaContextRetrievalDebugInfo { - project: project.clone(), - timestamp: Instant::now(), - }, - )) - .ok(); - } - - this.update(cx, |this, _cx| { - let Some(zeta_project) = this.projects.get_mut(&project.entity_id()) else { - return Ok(()); - }; - zeta_project.refresh_context_task.take(); - if let Some(debug_tx) = &this.debug_tx { - debug_tx - .unbounded_send(ZetaDebugInfo::ContextRetrievalFinished( - ZetaContextRetrievalDebugInfo { - project, - timestamp: Instant::now(), - }, - )) - .ok(); - } - match related_excerpts_result { - Ok(excerpts) => { - zeta_project.context = Some(excerpts); - Ok(()) - } - Err(error) => Err(error), - } - })? - }) - } - - pub fn set_context( - &mut self, - project: Entity, - context: HashMap, Vec>>, - ) { - if let Some(zeta_project) = self.projects.get_mut(&project.entity_id()) { - zeta_project.context = Some(context); - } - } - - fn gather_nearby_diagnostics( - cursor_offset: usize, - diagnostic_sets: &[(LanguageServerId, DiagnosticSet)], - snapshot: &BufferSnapshot, - max_diagnostics_bytes: usize, - ) -> (Vec, bool) { - // TODO: Could make this more efficient - let mut diagnostic_groups = Vec::new(); - for (language_server_id, diagnostics) in diagnostic_sets { - let mut groups = Vec::new(); - diagnostics.groups(*language_server_id, &mut groups, &snapshot); - diagnostic_groups.extend( - groups - .into_iter() - .map(|(_, group)| group.resolve::(&snapshot)), - ); - } - - // sort by proximity to cursor - diagnostic_groups.sort_by_key(|group| { - let range = &group.entries[group.primary_ix].range; - if range.start >= cursor_offset { - range.start - cursor_offset - } else if cursor_offset >= range.end { - cursor_offset - range.end - } else { - (cursor_offset - range.start).min(range.end - cursor_offset) - } - }); - - let mut results = Vec::new(); - let mut diagnostic_groups_truncated = false; - let mut diagnostics_byte_count = 0; - for group in diagnostic_groups { - let raw_value = serde_json::value::to_raw_value(&group).unwrap(); - diagnostics_byte_count += raw_value.get().len(); - if diagnostics_byte_count > max_diagnostics_bytes { - diagnostic_groups_truncated = true; - break; - } - results.push(predict_edits_v3::DiagnosticGroup(raw_value)); - } - - (results, diagnostic_groups_truncated) - } - - // TODO: Dedupe with similar code in request_prediction? - pub fn cloud_request_for_zeta_cli( - &mut self, - project: &Entity, - buffer: &Entity, - position: language::Anchor, - cx: &mut Context, - ) -> Task> { - let project_state = self.projects.get(&project.entity_id()); - - let index_state = project_state.and_then(|state| { - state - .syntax_index - .as_ref() - .map(|index| index.read_with(cx, |index, _cx| index.state().clone())) - }); - let options = self.options.clone(); - let snapshot = buffer.read(cx).snapshot(); - let Some(excerpt_path) = snapshot.file().map(|path| path.full_path(cx)) else { - return Task::ready(Err(anyhow!("No file path for excerpt"))); - }; - let worktree_snapshots = project - .read(cx) - .worktrees(cx) - .map(|worktree| worktree.read(cx).snapshot()) - .collect::>(); - - let parent_abs_path = project::File::from_dyn(buffer.read(cx).file()).and_then(|f| { - let mut path = f.worktree.read(cx).absolutize(&f.path); - if path.pop() { Some(path) } else { None } - }); - - cx.background_spawn(async move { - let index_state = if let Some(index_state) = index_state { - Some(index_state.lock_owned().await) - } else { - None - }; - - let cursor_point = position.to_point(&snapshot); - - let debug_info = true; - EditPredictionContext::gather_context( - cursor_point, - &snapshot, - parent_abs_path.as_deref(), - match &options.context { - ContextMode::Agentic(_) => { - // TODO - panic!("Llm mode not supported in zeta cli yet"); - } - ContextMode::Syntax(edit_prediction_context_options) => { - edit_prediction_context_options - } - }, - index_state.as_deref(), - ) - .context("Failed to select excerpt") - .map(|context| { - make_syntax_context_cloud_request( - excerpt_path.into(), - context, - // TODO pass everything - Vec::new(), - false, - Vec::new(), - false, - None, - debug_info, - &worktree_snapshots, - index_state.as_deref(), - Some(options.max_prompt_bytes), - options.prompt_format, - ) - }) - }) - } - - pub fn wait_for_initial_indexing( - &mut self, - project: &Entity, - cx: &mut Context, - ) -> Task> { - let zeta_project = self.get_or_init_zeta_project(project, cx); - if let Some(syntax_index) = &zeta_project.syntax_index { - syntax_index.read(cx).wait_for_initial_file_indexing(cx) - } else { - Task::ready(Ok(())) - } - } -} - -pub fn text_from_response(mut res: open_ai::Response) -> Option { - let choice = res.choices.pop()?; - let output_text = match choice.message { - open_ai::RequestMessage::Assistant { - content: Some(open_ai::MessageContent::Plain(content)), - .. - } => content, - open_ai::RequestMessage::Assistant { - content: Some(open_ai::MessageContent::Multipart(mut content)), - .. - } => { - if content.is_empty() { - log::error!("No output from Baseten completion response"); - return None; - } - - match content.remove(0) { - open_ai::MessagePart::Text { text } => text, - open_ai::MessagePart::Image { .. } => { - log::error!("Expected text, got an image"); - return None; - } - } - } - _ => { - log::error!("Invalid response message: {:?}", choice.message); - return None; - } - }; - Some(output_text) -} - -#[derive(Error, Debug)] -#[error( - "You must update to Zed version {minimum_version} or higher to continue using edit predictions." -)] -pub struct ZedUpdateRequiredError { - minimum_version: Version, -} - -fn make_syntax_context_cloud_request( - excerpt_path: Arc, - context: EditPredictionContext, - events: Vec, - can_collect_data: bool, - diagnostic_groups: Vec, - diagnostic_groups_truncated: bool, - git_info: Option, - debug_info: bool, - worktrees: &Vec, - index_state: Option<&SyntaxIndexState>, - prompt_max_bytes: Option, - prompt_format: PromptFormat, -) -> predict_edits_v3::PredictEditsRequest { - let mut signatures = Vec::new(); - let mut declaration_to_signature_index = HashMap::default(); - let mut referenced_declarations = Vec::new(); - - for snippet in context.declarations { - let project_entry_id = snippet.declaration.project_entry_id(); - let Some(path) = worktrees.iter().find_map(|worktree| { - worktree.entry_for_id(project_entry_id).map(|entry| { - let mut full_path = RelPathBuf::new(); - full_path.push(worktree.root_name()); - full_path.push(&entry.path); - full_path - }) - }) else { - continue; - }; - - let parent_index = index_state.and_then(|index_state| { - snippet.declaration.parent().and_then(|parent| { - add_signature( - parent, - &mut declaration_to_signature_index, - &mut signatures, - index_state, - ) - }) - }); - - let (text, text_is_truncated) = snippet.declaration.item_text(); - referenced_declarations.push(predict_edits_v3::ReferencedDeclaration { - path: path.as_std_path().into(), - text: text.into(), - range: snippet.declaration.item_line_range(), - text_is_truncated, - signature_range: snippet.declaration.signature_range_in_item_text(), - parent_index, - signature_score: snippet.score(DeclarationStyle::Signature), - declaration_score: snippet.score(DeclarationStyle::Declaration), - score_components: snippet.components, - }); - } - - let excerpt_parent = index_state.and_then(|index_state| { - context - .excerpt - .parent_declarations - .last() - .and_then(|(parent, _)| { - add_signature( - *parent, - &mut declaration_to_signature_index, - &mut signatures, - index_state, - ) - }) - }); - - predict_edits_v3::PredictEditsRequest { - excerpt_path, - excerpt: context.excerpt_text.body, - excerpt_line_range: context.excerpt.line_range, - excerpt_range: context.excerpt.range, - cursor_point: predict_edits_v3::Point { - line: predict_edits_v3::Line(context.cursor_point.row), - column: context.cursor_point.column, - }, - referenced_declarations, - included_files: vec![], - signatures, - excerpt_parent, - events, - can_collect_data, - diagnostic_groups, - diagnostic_groups_truncated, - git_info, - debug_info, - prompt_max_bytes, - prompt_format, - } -} - -fn add_signature( - declaration_id: DeclarationId, - declaration_to_signature_index: &mut HashMap, - signatures: &mut Vec, - index: &SyntaxIndexState, -) -> Option { - if let Some(signature_index) = declaration_to_signature_index.get(&declaration_id) { - return Some(*signature_index); - } - let Some(parent_declaration) = index.declaration(declaration_id) else { - log::error!("bug: missing parent declaration"); - return None; - }; - let parent_index = parent_declaration.parent().and_then(|parent| { - add_signature(parent, declaration_to_signature_index, signatures, index) - }); - let (text, text_is_truncated) = parent_declaration.signature_text(); - let signature_index = signatures.len(); - signatures.push(Signature { - text: text.into(), - text_is_truncated, - parent_index, - range: parent_declaration.signature_line_range(), - }); - declaration_to_signature_index.insert(declaration_id, signature_index); - Some(signature_index) -} - -#[cfg(feature = "eval-support")] -pub type EvalCacheKey = (EvalCacheEntryKind, u64); - -#[cfg(feature = "eval-support")] -#[derive(Debug, Clone, Copy, PartialEq)] -pub enum EvalCacheEntryKind { - Context, - Search, - Prediction, -} - -#[cfg(feature = "eval-support")] -impl std::fmt::Display for EvalCacheEntryKind { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - match self { - EvalCacheEntryKind::Search => write!(f, "search"), - EvalCacheEntryKind::Context => write!(f, "context"), - EvalCacheEntryKind::Prediction => write!(f, "prediction"), - } - } -} - -#[cfg(feature = "eval-support")] -pub trait EvalCache: Send + Sync { - fn read(&self, key: EvalCacheKey) -> Option; - fn write(&self, key: EvalCacheKey, input: &str, value: &str); -} - -#[cfg(test)] -mod tests { - use std::{path::Path, sync::Arc}; - - use client::UserStore; - use clock::FakeSystemClock; - use cloud_zeta2_prompt::retrieval_prompt::{SearchToolInput, SearchToolQuery}; - use futures::{ - AsyncReadExt, StreamExt, - channel::{mpsc, oneshot}, - }; - use gpui::{ - Entity, TestAppContext, - http_client::{FakeHttpClient, Response}, - prelude::*, - }; - use indoc::indoc; - use language::OffsetRangeExt as _; - use open_ai::Usage; - use pretty_assertions::{assert_eq, assert_matches}; - use project::{FakeFs, Project}; - use serde_json::json; - use settings::SettingsStore; - use util::path; - use uuid::Uuid; - - use crate::{BufferEditPrediction, Zeta}; - - #[gpui::test] - async fn test_current_state(cx: &mut TestAppContext) { - let (zeta, mut req_rx) = init_test(cx); - let fs = FakeFs::new(cx.executor()); - fs.insert_tree( - "/root", - json!({ - "1.txt": "Hello!\nHow\nBye\n", - "2.txt": "Hola!\nComo\nAdios\n" - }), - ) - .await; - let project = Project::test(fs, vec![path!("/root").as_ref()], cx).await; - - zeta.update(cx, |zeta, cx| { - zeta.register_project(&project, cx); - }); - - let buffer1 = project - .update(cx, |project, cx| { - let path = project.find_project_path(path!("root/1.txt"), cx).unwrap(); - project.open_buffer(path, cx) - }) - .await - .unwrap(); - let snapshot1 = buffer1.read_with(cx, |buffer, _cx| buffer.snapshot()); - let position = snapshot1.anchor_before(language::Point::new(1, 3)); - - // Prediction for current file - - zeta.update(cx, |zeta, cx| { - zeta.refresh_prediction_from_buffer(project.clone(), buffer1.clone(), position, cx) - }); - let (_request, respond_tx) = req_rx.next().await.unwrap(); - - respond_tx - .send(model_response(indoc! {r" - --- a/root/1.txt - +++ b/root/1.txt - @@ ... @@ - Hello! - -How - +How are you? - Bye - "})) - .unwrap(); - - cx.run_until_parked(); - - zeta.read_with(cx, |zeta, cx| { - let prediction = zeta - .current_prediction_for_buffer(&buffer1, &project, cx) - .unwrap(); - assert_matches!(prediction, BufferEditPrediction::Local { .. }); - }); - - // Context refresh - let refresh_task = zeta.update(cx, |zeta, cx| { - zeta.refresh_context(project.clone(), buffer1.clone(), position, cx) - }); - let (_request, respond_tx) = req_rx.next().await.unwrap(); - respond_tx - .send(open_ai::Response { - id: Uuid::new_v4().to_string(), - object: "response".into(), - created: 0, - model: "model".into(), - choices: vec![open_ai::Choice { - index: 0, - message: open_ai::RequestMessage::Assistant { - content: None, - tool_calls: vec![open_ai::ToolCall { - id: "search".into(), - content: open_ai::ToolCallContent::Function { - function: open_ai::FunctionContent { - name: cloud_zeta2_prompt::retrieval_prompt::TOOL_NAME - .to_string(), - arguments: serde_json::to_string(&SearchToolInput { - queries: Box::new([SearchToolQuery { - glob: "root/2.txt".to_string(), - syntax_node: vec![], - content: Some(".".into()), - }]), - }) - .unwrap(), - }, - }, - }], - }, - finish_reason: None, - }], - usage: Usage { - prompt_tokens: 0, - completion_tokens: 0, - total_tokens: 0, - }, - }) - .unwrap(); - refresh_task.await.unwrap(); - - zeta.update(cx, |zeta, _cx| { - zeta.discard_current_prediction(&project); - }); - - // Prediction for another file - zeta.update(cx, |zeta, cx| { - zeta.refresh_prediction_from_buffer(project.clone(), buffer1.clone(), position, cx) - }); - let (_request, respond_tx) = req_rx.next().await.unwrap(); - respond_tx - .send(model_response(indoc! {r#" - --- a/root/2.txt - +++ b/root/2.txt - Hola! - -Como - +Como estas? - Adios - "#})) - .unwrap(); - cx.run_until_parked(); - - zeta.read_with(cx, |zeta, cx| { - let prediction = zeta - .current_prediction_for_buffer(&buffer1, &project, cx) - .unwrap(); - assert_matches!( - prediction, - BufferEditPrediction::Jump { prediction } if prediction.snapshot.file().unwrap().full_path(cx) == Path::new(path!("root/2.txt")) - ); - }); - - let buffer2 = project - .update(cx, |project, cx| { - let path = project.find_project_path(path!("root/2.txt"), cx).unwrap(); - project.open_buffer(path, cx) - }) - .await - .unwrap(); - - zeta.read_with(cx, |zeta, cx| { - let prediction = zeta - .current_prediction_for_buffer(&buffer2, &project, cx) - .unwrap(); - assert_matches!(prediction, BufferEditPrediction::Local { .. }); - }); - } - - #[gpui::test] - async fn test_simple_request(cx: &mut TestAppContext) { - let (zeta, mut req_rx) = init_test(cx); - let fs = FakeFs::new(cx.executor()); - fs.insert_tree( - "/root", - json!({ - "foo.md": "Hello!\nHow\nBye\n" - }), - ) - .await; - let project = Project::test(fs, vec![path!("/root").as_ref()], cx).await; - - let buffer = project - .update(cx, |project, cx| { - let path = project.find_project_path(path!("root/foo.md"), cx).unwrap(); - project.open_buffer(path, cx) - }) - .await - .unwrap(); - let snapshot = buffer.read_with(cx, |buffer, _cx| buffer.snapshot()); - let position = snapshot.anchor_before(language::Point::new(1, 3)); - - let prediction_task = zeta.update(cx, |zeta, cx| { - zeta.request_prediction(&project, &buffer, position, cx) - }); - - let (_, respond_tx) = req_rx.next().await.unwrap(); - - // TODO Put back when we have a structured request again - // assert_eq!( - // request.excerpt_path.as_ref(), - // Path::new(path!("root/foo.md")) - // ); - // assert_eq!( - // request.cursor_point, - // Point { - // line: Line(1), - // column: 3 - // } - // ); - - respond_tx - .send(model_response(indoc! { r" - --- a/root/foo.md - +++ b/root/foo.md - @@ ... @@ - Hello! - -How - +How are you? - Bye - "})) - .unwrap(); - - let prediction = prediction_task.await.unwrap().unwrap(); - - assert_eq!(prediction.edits.len(), 1); - assert_eq!( - prediction.edits[0].0.to_point(&snapshot).start, - language::Point::new(1, 3) - ); - assert_eq!(prediction.edits[0].1.as_ref(), " are you?"); - } - - #[gpui::test] - async fn test_request_events(cx: &mut TestAppContext) { - let (zeta, mut req_rx) = init_test(cx); - let fs = FakeFs::new(cx.executor()); - fs.insert_tree( - "/root", - json!({ - "foo.md": "Hello!\n\nBye\n" - }), - ) - .await; - let project = Project::test(fs, vec![path!("/root").as_ref()], cx).await; - - let buffer = project - .update(cx, |project, cx| { - let path = project.find_project_path(path!("root/foo.md"), cx).unwrap(); - project.open_buffer(path, cx) - }) - .await - .unwrap(); - - zeta.update(cx, |zeta, cx| { - zeta.register_buffer(&buffer, &project, cx); - }); - - buffer.update(cx, |buffer, cx| { - buffer.edit(vec![(7..7, "How")], None, cx); - }); - - let snapshot = buffer.read_with(cx, |buffer, _cx| buffer.snapshot()); - let position = snapshot.anchor_before(language::Point::new(1, 3)); - - let prediction_task = zeta.update(cx, |zeta, cx| { - zeta.request_prediction(&project, &buffer, position, cx) - }); - - let (request, respond_tx) = req_rx.next().await.unwrap(); - - let prompt = prompt_from_request(&request); - assert!( - prompt.contains(indoc! {" - --- a/root/foo.md - +++ b/root/foo.md - @@ -1,3 +1,3 @@ - Hello! - - - +How - Bye - "}), - "{prompt}" - ); - - respond_tx - .send(model_response(indoc! {r#" - --- a/root/foo.md - +++ b/root/foo.md - @@ ... @@ - Hello! - -How - +How are you? - Bye - "#})) - .unwrap(); - - let prediction = prediction_task.await.unwrap().unwrap(); - - assert_eq!(prediction.edits.len(), 1); - assert_eq!( - prediction.edits[0].0.to_point(&snapshot).start, - language::Point::new(1, 3) - ); - assert_eq!(prediction.edits[0].1.as_ref(), " are you?"); - } - - // Skipped until we start including diagnostics in prompt - // #[gpui::test] - // async fn test_request_diagnostics(cx: &mut TestAppContext) { - // let (zeta, mut req_rx) = init_test(cx); - // let fs = FakeFs::new(cx.executor()); - // fs.insert_tree( - // "/root", - // json!({ - // "foo.md": "Hello!\nBye" - // }), - // ) - // .await; - // let project = Project::test(fs, vec![path!("/root").as_ref()], cx).await; - - // let path_to_buffer_uri = lsp::Uri::from_file_path(path!("/root/foo.md")).unwrap(); - // let diagnostic = lsp::Diagnostic { - // range: lsp::Range::new(lsp::Position::new(1, 1), lsp::Position::new(1, 5)), - // severity: Some(lsp::DiagnosticSeverity::ERROR), - // message: "\"Hello\" deprecated. Use \"Hi\" instead".to_string(), - // ..Default::default() - // }; - - // project.update(cx, |project, cx| { - // project.lsp_store().update(cx, |lsp_store, cx| { - // // Create some diagnostics - // lsp_store - // .update_diagnostics( - // LanguageServerId(0), - // lsp::PublishDiagnosticsParams { - // uri: path_to_buffer_uri.clone(), - // diagnostics: vec![diagnostic], - // version: None, - // }, - // None, - // language::DiagnosticSourceKind::Pushed, - // &[], - // cx, - // ) - // .unwrap(); - // }); - // }); - - // let buffer = project - // .update(cx, |project, cx| { - // let path = project.find_project_path(path!("root/foo.md"), cx).unwrap(); - // project.open_buffer(path, cx) - // }) - // .await - // .unwrap(); - - // let snapshot = buffer.read_with(cx, |buffer, _cx| buffer.snapshot()); - // let position = snapshot.anchor_before(language::Point::new(0, 0)); - - // let _prediction_task = zeta.update(cx, |zeta, cx| { - // zeta.request_prediction(&project, &buffer, position, cx) - // }); - - // let (request, _respond_tx) = req_rx.next().await.unwrap(); - - // assert_eq!(request.diagnostic_groups.len(), 1); - // let value = serde_json::from_str::(request.diagnostic_groups[0].0.get()) - // .unwrap(); - // // We probably don't need all of this. TODO define a specific diagnostic type in predict_edits_v3 - // assert_eq!( - // value, - // json!({ - // "entries": [{ - // "range": { - // "start": 8, - // "end": 10 - // }, - // "diagnostic": { - // "source": null, - // "code": null, - // "code_description": null, - // "severity": 1, - // "message": "\"Hello\" deprecated. Use \"Hi\" instead", - // "markdown": null, - // "group_id": 0, - // "is_primary": true, - // "is_disk_based": false, - // "is_unnecessary": false, - // "source_kind": "Pushed", - // "data": null, - // "underline": true - // } - // }], - // "primary_ix": 0 - // }) - // ); - // } - - fn model_response(text: &str) -> open_ai::Response { - open_ai::Response { - id: Uuid::new_v4().to_string(), - object: "response".into(), - created: 0, - model: "model".into(), - choices: vec![open_ai::Choice { - index: 0, - message: open_ai::RequestMessage::Assistant { - content: Some(open_ai::MessageContent::Plain(text.to_string())), - tool_calls: vec![], - }, - finish_reason: None, - }], - usage: Usage { - prompt_tokens: 0, - completion_tokens: 0, - total_tokens: 0, - }, - } - } - - fn prompt_from_request(request: &open_ai::Request) -> &str { - assert_eq!(request.messages.len(), 1); - let open_ai::RequestMessage::User { - content: open_ai::MessageContent::Plain(content), - .. - } = &request.messages[0] - else { - panic!( - "Request does not have single user message of type Plain. {:#?}", - request - ); - }; - content - } - - fn init_test( - cx: &mut TestAppContext, - ) -> ( - Entity, - mpsc::UnboundedReceiver<(open_ai::Request, oneshot::Sender)>, - ) { - cx.update(move |cx| { - let settings_store = SettingsStore::test(cx); - cx.set_global(settings_store); - zlog::init_test(); - - let (req_tx, req_rx) = mpsc::unbounded(); - - let http_client = FakeHttpClient::create({ - move |req| { - let uri = req.uri().path().to_string(); - let mut body = req.into_body(); - let req_tx = req_tx.clone(); - async move { - let resp = match uri.as_str() { - "/client/llm_tokens" => serde_json::to_string(&json!({ - "token": "test" - })) - .unwrap(), - "/predict_edits/raw" => { - let mut buf = Vec::new(); - body.read_to_end(&mut buf).await.ok(); - let req = serde_json::from_slice(&buf).unwrap(); - - let (res_tx, res_rx) = oneshot::channel(); - req_tx.unbounded_send((req, res_tx)).unwrap(); - serde_json::to_string(&res_rx.await?).unwrap() - } - _ => { - panic!("Unexpected path: {}", uri) - } - }; - - Ok(Response::builder().body(resp.into()).unwrap()) - } - } - }); - - let client = client::Client::new(Arc::new(FakeSystemClock::new()), http_client, cx); - client.cloud_client().set_credentials(1, "test".into()); - - language_model::init(client.clone(), cx); - - let user_store = cx.new(|cx| UserStore::new(client.clone(), cx)); - let zeta = Zeta::global(&client, &user_store, cx); - - (zeta, req_rx) - }) - } -} diff --git a/crates/zeta2_tools/Cargo.toml b/crates/zeta2_tools/Cargo.toml index 3a9b1ccbf9340dfdaa06030e59c2112b9cda6307..607e24c895d96de1464ff1bfa2a4dfa01c5d9669 100644 --- a/crates/zeta2_tools/Cargo.toml +++ b/crates/zeta2_tools/Cargo.toml @@ -13,7 +13,6 @@ path = "src/zeta2_tools.rs" [dependencies] anyhow.workspace = true -chrono.workspace = true client.workspace = true cloud_llm_client.workspace = true cloud_zeta2_prompt.workspace = true @@ -24,9 +23,7 @@ feature_flags.workspace = true futures.workspace = true gpui.workspace = true language.workspace = true -log.workspace = true multi_buffer.workspace = true -ordered-float.workspace = true project.workspace = true serde.workspace = true serde_json.workspace = true @@ -36,7 +33,7 @@ ui.workspace = true ui_input.workspace = true util.workspace = true workspace.workspace = true -zeta2.workspace = true +zeta.workspace = true [dev-dependencies] clap.workspace = true diff --git a/crates/zeta2_tools/src/zeta2_context_view.rs b/crates/zeta2_tools/src/zeta2_context_view.rs index 759d0d0a3da1adbd9e61fa05b5d305ca9de1f823..54f1ea2d813f7c00d30b12e341fb3e5ac3f155dc 100644 --- a/crates/zeta2_tools/src/zeta2_context_view.rs +++ b/crates/zeta2_tools/src/zeta2_context_view.rs @@ -25,7 +25,7 @@ use ui::{ v_flex, }; use workspace::Item; -use zeta2::{ +use zeta::{ Zeta, ZetaContextRetrievalDebugInfo, ZetaContextRetrievalStartedDebugInfo, ZetaDebugInfo, ZetaSearchQueryDebugInfo, }; diff --git a/crates/zeta2_tools/src/zeta2_tools.rs b/crates/zeta2_tools/src/zeta2_tools.rs index 8758857e7cf50d6a5f2e5a4ea509293b18a8cb2c..6a6268f68ad0fa10e2379ac21e07d4fa530dddc1 100644 --- a/crates/zeta2_tools/src/zeta2_tools.rs +++ b/crates/zeta2_tools/src/zeta2_tools.rs @@ -1,30 +1,26 @@ mod zeta2_context_view; -use std::{cmp::Reverse, path::PathBuf, str::FromStr, sync::Arc}; +use std::{str::FromStr, sync::Arc, time::Duration}; -use chrono::TimeDelta; use client::{Client, UserStore}; -use cloud_llm_client::predict_edits_v3::{ - DeclarationScoreComponents, PredictEditsRequest, PromptFormat, -}; +use cloud_llm_client::predict_edits_v3::PromptFormat; use collections::HashMap; -use editor::{Editor, EditorEvent, EditorMode, ExcerptRange, MultiBuffer}; +use editor::{Editor, EditorEvent, EditorMode, MultiBuffer}; use feature_flags::FeatureFlagAppExt as _; use futures::{FutureExt, StreamExt as _, channel::oneshot, future::Shared}; use gpui::{ - CursorStyle, Empty, Entity, EventEmitter, FocusHandle, Focusable, Subscription, Task, - WeakEntity, actions, prelude::*, + Empty, Entity, EventEmitter, FocusHandle, Focusable, Subscription, Task, WeakEntity, actions, + prelude::*, }; -use language::{Buffer, DiskState}; -use ordered_float::OrderedFloat; -use project::{Project, WorktreeId, telemetry_snapshot::TelemetrySnapshot}; +use language::Buffer; +use project::{Project, telemetry_snapshot::TelemetrySnapshot}; use ui::{ButtonLike, ContextMenu, ContextMenuEntry, DropdownMenu, KeyBinding, prelude::*}; use ui_input::InputField; -use util::{ResultExt, paths::PathStyle, rel_path::RelPath}; +use util::ResultExt; use workspace::{Item, SplitDirection, Workspace}; -use zeta2::{ - AgenticContextOptions, ContextMode, DEFAULT_SYNTAX_CONTEXT_OPTIONS, Zeta, Zeta2FeatureFlag, - ZetaDebugInfo, ZetaEditPredictionDebugInfo, ZetaOptions, +use zeta::{ + AgenticContextOptions, ContextMode, DEFAULT_SYNTAX_CONTEXT_OPTIONS, EditPredictionInputs, Zeta, + Zeta2FeatureFlag, ZetaDebugInfo, ZetaEditPredictionDebugInfo, ZetaOptions, }; use edit_prediction_context::{EditPredictionContextOptions, EditPredictionExcerptOptions}; @@ -99,7 +95,6 @@ pub struct Zeta2Inspector { cursor_context_ratio_input: Entity, max_prompt_bytes_input: Entity, context_mode: ContextModeState, - active_view: ActiveView, zeta: Entity, _active_editor_subscription: Option, _update_state_task: Task<()>, @@ -113,21 +108,14 @@ pub enum ContextModeState { }, } -#[derive(PartialEq)] -enum ActiveView { - Context, - Inference, -} - struct LastPrediction { - context_editor: Entity, prompt_editor: Entity, - retrieval_time: TimeDelta, - request_time: Option, + retrieval_time: Duration, + request_time: Option, buffer: WeakEntity, position: language::Anchor, state: LastPredictionState, - request: PredictEditsRequest, + inputs: EditPredictionInputs, project_snapshot: Shared>>, _task: Option>, } @@ -175,7 +163,6 @@ impl Zeta2Inspector { focus_handle: cx.focus_handle(), project: project.clone(), last_prediction: None, - active_view: ActiveView::Inference, max_excerpt_bytes_input: Self::number_input("Max Excerpt Bytes", window, cx), min_excerpt_bytes_input: Self::number_input("Min Excerpt Bytes", window, cx), cursor_context_ratio_input: Self::number_input("Cursor Context Ratio", window, cx), @@ -305,7 +292,7 @@ impl Zeta2Inspector { ContextMode::Syntax(context_options) => { let max_retrieved_declarations = match &this.context_mode { ContextModeState::Llm => { - zeta2::DEFAULT_SYNTAX_CONTEXT_OPTIONS.max_retrieved_declarations + zeta::DEFAULT_SYNTAX_CONTEXT_OPTIONS.max_retrieved_declarations } ContextModeState::Syntax { max_retrieved_declarations, @@ -340,22 +327,10 @@ impl Zeta2Inspector { fn update_last_prediction( &mut self, - prediction: zeta2::ZetaDebugInfo, + prediction: zeta::ZetaDebugInfo, window: &mut Window, cx: &mut Context, ) { - let project = self.project.read(cx); - let path_style = project.path_style(cx); - let Some(worktree_id) = project - .worktrees(cx) - .next() - .map(|worktree| worktree.read(cx).id()) - else { - log::error!("Open a worktree to use edit prediction debug view"); - self.last_prediction.take(); - return; - }; - self._update_state_task = cx.spawn_in(window, { let language_registry = self.project.read(cx).languages().clone(); async move |this, cx| { @@ -364,11 +339,10 @@ impl Zeta2Inspector { return; }; for ext in prediction - .request - .referenced_declarations + .inputs + .included_files .iter() - .filter_map(|snippet| snippet.path.extension()) - .chain(prediction.request.excerpt_path.extension()) + .filter_map(|file| file.path.extension()) { if !languages.contains_key(ext) { // Most snippets are gonna be the same language, @@ -391,90 +365,6 @@ impl Zeta2Inspector { let json_language = language_registry.language_for_name("Json").await.log_err(); this.update_in(cx, |this, window, cx| { - let context_editor = cx.new(|cx| { - let mut excerpt_score_components = HashMap::default(); - - let multibuffer = cx.new(|cx| { - let mut multibuffer = MultiBuffer::new(language::Capability::ReadOnly); - let excerpt_file = Arc::new(ExcerptMetadataFile { - title: RelPath::unix("Cursor Excerpt").unwrap().into(), - path_style, - worktree_id, - }); - - let excerpt_buffer = cx.new(|cx| { - let mut buffer = - Buffer::local(prediction.request.excerpt.clone(), cx); - if let Some(language) = prediction - .request - .excerpt_path - .extension() - .and_then(|ext| languages.get(ext)) - { - buffer.set_language(language.clone(), cx); - } - buffer.file_updated(excerpt_file, cx); - buffer - }); - - multibuffer.push_excerpts( - excerpt_buffer, - [ExcerptRange::new(text::Anchor::MIN..text::Anchor::MAX)], - cx, - ); - - let mut declarations = - prediction.request.referenced_declarations.clone(); - declarations.sort_unstable_by_key(|declaration| { - Reverse(OrderedFloat(declaration.declaration_score)) - }); - - for snippet in &declarations { - let snippet_file = Arc::new(ExcerptMetadataFile { - title: RelPath::unix(&format!( - "{} (Score: {})", - snippet.path.display(), - snippet.declaration_score - )) - .unwrap() - .into(), - path_style, - worktree_id, - }); - - let excerpt_buffer = cx.new(|cx| { - let mut buffer = Buffer::local(snippet.text.clone(), cx); - buffer.file_updated(snippet_file, cx); - if let Some(ext) = snippet.path.extension() - && let Some(language) = languages.get(ext) - { - buffer.set_language(language.clone(), cx); - } - buffer - }); - - let excerpt_ids = multibuffer.push_excerpts( - excerpt_buffer, - [ExcerptRange::new(text::Anchor::MIN..text::Anchor::MAX)], - cx, - ); - let excerpt_id = excerpt_ids.first().unwrap(); - - excerpt_score_components - .insert(*excerpt_id, snippet.score_components.clone()); - } - - multibuffer - }); - - let mut editor = - Editor::new(EditorMode::full(), multibuffer, None, window, cx); - editor.register_addon(ZetaContextAddon { - excerpt_score_components, - }); - editor - }); - let ZetaEditPredictionDebugInfo { response_rx, position, @@ -606,7 +496,6 @@ impl Zeta2Inspector { let project_snapshot_task = TelemetrySnapshot::new(&this.project, cx); this.last_prediction = Some(LastPrediction { - context_editor, prompt_editor: cx.new(|cx| { let buffer = cx.new(|cx| { let mut buffer = @@ -632,7 +521,7 @@ impl Zeta2Inspector { .foreground_executor() .spawn(async move { Arc::new(project_snapshot_task.await) }) .shared(), - request: prediction.request, + inputs: prediction.inputs, _task: Some(task), }); cx.notify(); @@ -664,9 +553,6 @@ impl Zeta2Inspector { let Some(last_prediction) = self.last_prediction.as_mut() else { return; }; - if !last_prediction.request.can_collect_data { - return; - } let project_snapshot_task = last_prediction.project_snapshot.clone(); @@ -718,7 +604,7 @@ impl Zeta2Inspector { id = request_id, kind = kind, text = text, - request = last_prediction.request, + request = last_prediction.inputs, project_snapshot = project_snapshot, ); }) @@ -727,17 +613,6 @@ impl Zeta2Inspector { .detach(); } - fn focus_feedback(&mut self, window: &mut Window, cx: &mut Context) { - if let Some(last_prediction) = self.last_prediction.as_mut() { - if let LastPredictionState::Success { - feedback_editor, .. - } = &mut last_prediction.state - { - feedback_editor.focus_handle(cx).focus(window); - } - }; - } - fn render_options(&self, window: &mut Window, cx: &mut Context) -> Div { v_flex() .gap_2() @@ -747,11 +622,11 @@ impl Zeta2Inspector { .justify_between() .child( ui::Button::new("reset-options", "Reset") - .disabled(self.zeta.read(cx).options() == &zeta2::DEFAULT_OPTIONS) + .disabled(self.zeta.read(cx).options() == &zeta::DEFAULT_OPTIONS) .style(ButtonStyle::Outlined) .size(ButtonSize::Large) .on_click(cx.listener(|this, _, window, cx| { - this.set_options_state(&zeta2::DEFAULT_OPTIONS, window, cx); + this.set_options_state(&zeta::DEFAULT_OPTIONS, window, cx); })), ), ) @@ -915,42 +790,6 @@ impl Zeta2Inspector { ) } - fn render_tabs(&self, cx: &mut Context) -> Option { - if self.last_prediction.is_none() { - return None; - }; - - Some( - ui::ToggleButtonGroup::single_row( - "prediction", - [ - ui::ToggleButtonSimple::new( - "Context", - cx.listener(|this, _, _, cx| { - this.active_view = ActiveView::Context; - cx.notify(); - }), - ), - ui::ToggleButtonSimple::new( - "Inference", - cx.listener(|this, _, window, cx| { - this.active_view = ActiveView::Inference; - this.focus_feedback(window, cx); - cx.notify(); - }), - ), - ], - ) - .style(ui::ToggleButtonGroupStyle::Outlined) - .selected_index(if self.active_view == ActiveView::Context { - 0 - } else { - 1 - }) - .into_any_element(), - ) - } - fn render_stats(&self) -> Option
{ let Some(prediction) = self.last_prediction.as_ref() else { return None; @@ -970,15 +809,15 @@ impl Zeta2Inspector { ) } - fn render_duration(name: &'static str, time: Option) -> Div { + fn render_duration(name: &'static str, time: Option) -> Div { h_flex() .gap_1() .child(Label::new(name).color(Color::Muted).size(LabelSize::Small)) .child(match time { - Some(time) => Label::new(if time.num_microseconds().unwrap_or(0) >= 1000 { - format!("{} ms", time.num_milliseconds()) + Some(time) => Label::new(if time.as_micros() >= 1000 { + format!("{} ms", time.as_millis()) } else { - format!("{} µs", time.num_microseconds().unwrap_or(0)) + format!("{} µs", time.as_micros()) }) .size(LabelSize::Small), None => Label::new("...").size(LabelSize::Small), @@ -1006,144 +845,135 @@ impl Zeta2Inspector { } fn render_last_prediction(&self, prediction: &LastPrediction, cx: &mut Context) -> Div { - match &self.active_view { - ActiveView::Context => div().size_full().child(prediction.context_editor.clone()), - ActiveView::Inference => h_flex() - .items_start() - .w_full() - .flex_1() - .border_t_1() - .border_color(cx.theme().colors().border) - .bg(cx.theme().colors().editor_background) - .child( - v_flex() - .flex_1() - .gap_2() - .p_4() - .h_full() - .child( - h_flex() - .justify_between() - .child(ui::Headline::new("Prompt").size(ui::HeadlineSize::XSmall)) - .child(match prediction.state { - LastPredictionState::Requested - | LastPredictionState::Failed { .. } => ui::Chip::new("Local") - .bg_color(cx.theme().status().warning_background) - .label_color(Color::Success), - LastPredictionState::Success { .. } => ui::Chip::new("Cloud") - .bg_color(cx.theme().status().success_background) - .label_color(Color::Success), - }), - ) - .child(prediction.prompt_editor.clone()), - ) - .child(ui::vertical_divider()) - .child( - v_flex() - .flex_1() - .gap_2() - .h_full() - .child( + h_flex() + .items_start() + .w_full() + .flex_1() + .border_t_1() + .border_color(cx.theme().colors().border) + .bg(cx.theme().colors().editor_background) + .child( + v_flex() + .flex_1() + .gap_2() + .p_4() + .h_full() + .child( + h_flex() + .justify_between() + .child(ui::Headline::new("Prompt").size(ui::HeadlineSize::XSmall)) + .child(match prediction.state { + LastPredictionState::Requested + | LastPredictionState::Failed { .. } => ui::Chip::new("Local") + .bg_color(cx.theme().status().warning_background) + .label_color(Color::Success), + LastPredictionState::Success { .. } => ui::Chip::new("Cloud") + .bg_color(cx.theme().status().success_background) + .label_color(Color::Success), + }), + ) + .child(prediction.prompt_editor.clone()), + ) + .child(ui::vertical_divider()) + .child( + v_flex() + .flex_1() + .gap_2() + .h_full() + .child( + v_flex() + .flex_1() + .gap_2() + .p_4() + .child( + ui::Headline::new("Model Response").size(ui::HeadlineSize::XSmall), + ) + .child(match &prediction.state { + LastPredictionState::Success { + model_response_editor, + .. + } => model_response_editor.clone().into_any_element(), + LastPredictionState::Requested => v_flex() + .gap_2() + .child(Label::new("Loading...").buffer_font(cx)) + .into_any_element(), + LastPredictionState::Failed { message } => v_flex() + .gap_2() + .max_w_96() + .child(Label::new(message.clone()).buffer_font(cx)) + .into_any_element(), + }), + ) + .child(ui::divider()) + .child( + if let LastPredictionState::Success { + feedback_editor, + feedback: feedback_state, + .. + } = &prediction.state + { v_flex() - .flex_1() + .key_context("Zeta2Feedback") + .on_action(cx.listener(Self::handle_rate_positive)) + .on_action(cx.listener(Self::handle_rate_negative)) .gap_2() - .p_4() + .p_2() + .child(feedback_editor.clone()) .child( - ui::Headline::new("Model Response") - .size(ui::HeadlineSize::XSmall), - ) - .child(match &prediction.state { - LastPredictionState::Success { - model_response_editor, - .. - } => model_response_editor.clone().into_any_element(), - LastPredictionState::Requested => v_flex() - .gap_2() - .child(Label::new("Loading...").buffer_font(cx)) - .into_any_element(), - LastPredictionState::Failed { message } => v_flex() - .gap_2() - .max_w_96() - .child(Label::new(message.clone()).buffer_font(cx)) - .into_any_element(), - }), - ) - .child(ui::divider()) - .child( - if prediction.request.can_collect_data - && let LastPredictionState::Success { - feedback_editor, - feedback: feedback_state, - .. - } = &prediction.state - { - v_flex() - .key_context("Zeta2Feedback") - .on_action(cx.listener(Self::handle_rate_positive)) - .on_action(cx.listener(Self::handle_rate_negative)) - .gap_2() - .p_2() - .child(feedback_editor.clone()) - .child( - h_flex() - .justify_end() - .w_full() - .child( - ButtonLike::new("rate-positive") - .when( - *feedback_state == Some(Feedback::Positive), - |this| this.style(ButtonStyle::Filled), - ) - .child( - KeyBinding::for_action( - &Zeta2RatePredictionPositive, - cx, - ) - .size(TextSize::Small.rems(cx)), - ) - .child(ui::Icon::new(ui::IconName::ThumbsUp)) - .on_click(cx.listener( - |this, _, window, cx| { - this.handle_rate_positive( - &Zeta2RatePredictionPositive, - window, - cx, - ); - }, - )), - ) - .child( - ButtonLike::new("rate-negative") - .when( - *feedback_state == Some(Feedback::Negative), - |this| this.style(ButtonStyle::Filled), + h_flex() + .justify_end() + .w_full() + .child( + ButtonLike::new("rate-positive") + .when( + *feedback_state == Some(Feedback::Positive), + |this| this.style(ButtonStyle::Filled), + ) + .child( + KeyBinding::for_action( + &Zeta2RatePredictionPositive, + cx, ) - .child( - KeyBinding::for_action( - &Zeta2RatePredictionNegative, - cx, - ) - .size(TextSize::Small.rems(cx)), + .size(TextSize::Small.rems(cx)), + ) + .child(ui::Icon::new(ui::IconName::ThumbsUp)) + .on_click(cx.listener(|this, _, window, cx| { + this.handle_rate_positive( + &Zeta2RatePredictionPositive, + window, + cx, + ); + })), + ) + .child( + ButtonLike::new("rate-negative") + .when( + *feedback_state == Some(Feedback::Negative), + |this| this.style(ButtonStyle::Filled), + ) + .child( + KeyBinding::for_action( + &Zeta2RatePredictionNegative, + cx, ) - .child(ui::Icon::new(ui::IconName::ThumbsDown)) - .on_click(cx.listener( - |this, _, window, cx| { - this.handle_rate_negative( - &Zeta2RatePredictionNegative, - window, - cx, - ); - }, - )), - ), - ) - .into_any() - } else { - Empty.into_any_element() - }, - ), - ), - } + .size(TextSize::Small.rems(cx)), + ) + .child(ui::Icon::new(ui::IconName::ThumbsDown)) + .on_click(cx.listener(|this, _, window, cx| { + this.handle_rate_negative( + &Zeta2RatePredictionNegative, + window, + cx, + ); + })), + ), + ) + .into_any() + } else { + Empty.into_any_element() + }, + ), + ) } } @@ -1178,8 +1008,7 @@ impl Render for Zeta2Inspector { .h_full() .justify_between() .child(self.render_options(window, cx)) - .gap_4() - .children(self.render_tabs(cx)), + .gap_4(), ) .child(ui::vertical_divider()) .children(self.render_stats()), @@ -1187,104 +1016,3 @@ impl Render for Zeta2Inspector { .child(self.render_content(window, cx)) } } - -// Using same approach as commit view - -struct ExcerptMetadataFile { - title: Arc, - worktree_id: WorktreeId, - path_style: PathStyle, -} - -impl language::File for ExcerptMetadataFile { - fn as_local(&self) -> Option<&dyn language::LocalFile> { - None - } - - fn disk_state(&self) -> DiskState { - DiskState::New - } - - fn path(&self) -> &Arc { - &self.title - } - - fn full_path(&self, _: &App) -> PathBuf { - self.title.as_std_path().to_path_buf() - } - - fn file_name<'a>(&'a self, _: &'a App) -> &'a str { - self.title.file_name().unwrap() - } - - fn path_style(&self, _: &App) -> PathStyle { - self.path_style - } - - fn worktree_id(&self, _: &App) -> WorktreeId { - self.worktree_id - } - - fn to_proto(&self, _: &App) -> language::proto::File { - unimplemented!() - } - - fn is_private(&self) -> bool { - false - } -} - -struct ZetaContextAddon { - excerpt_score_components: HashMap, -} - -impl editor::Addon for ZetaContextAddon { - fn to_any(&self) -> &dyn std::any::Any { - self - } - - fn render_buffer_header_controls( - &self, - excerpt_info: &multi_buffer::ExcerptInfo, - _window: &Window, - _cx: &App, - ) -> Option { - let score_components = self.excerpt_score_components.get(&excerpt_info.id)?.clone(); - - Some( - div() - .id(excerpt_info.id.to_proto() as usize) - .child(ui::Icon::new(IconName::Info)) - .cursor(CursorStyle::PointingHand) - .tooltip(move |_, cx| { - cx.new(|_| ScoreComponentsTooltip::new(&score_components)) - .into() - }) - .into_any(), - ) - } -} - -struct ScoreComponentsTooltip { - text: SharedString, -} - -impl ScoreComponentsTooltip { - fn new(components: &DeclarationScoreComponents) -> Self { - Self { - text: format!("{:#?}", components).into(), - } - } -} - -impl Render for ScoreComponentsTooltip { - fn render(&mut self, _: &mut Window, cx: &mut Context) -> impl IntoElement { - div().pl_2().pt_2p5().child( - div() - .elevation_2(cx) - .py_1() - .px_2() - .child(ui::Label::new(self.text.clone()).buffer_font(cx)), - ) - } -} diff --git a/crates/zeta_cli/Cargo.toml b/crates/zeta_cli/Cargo.toml index e18cf54787ca98e2be60db4977dd2de18e9c09e2..2dbca537f55377e84f306e13649dfb71ccf2f181 100644 --- a/crates/zeta_cli/Cargo.toml +++ b/crates/zeta_cli/Cargo.toml @@ -53,8 +53,7 @@ terminal_view.workspace = true toml.workspace = true util.workspace = true watch.workspace = true -zeta.workspace = true -zeta2 = { workspace = true, features = ["eval-support"] } +zeta = { workspace = true, features = ["eval-support"] } zlog.workspace = true [dev-dependencies] diff --git a/crates/zeta_cli/src/evaluate.rs b/crates/zeta_cli/src/evaluate.rs index a9d7acaee2287450eac828bd2d770b88a8150940..a0ebdf998595ccacec2dafecf51b6094e5e401b5 100644 --- a/crates/zeta_cli/src/evaluate.rs +++ b/crates/zeta_cli/src/evaluate.rs @@ -9,7 +9,7 @@ use collections::HashSet; use gpui::{AsyncApp, Entity}; use project::Project; use util::ResultExt as _; -use zeta2::{Zeta, udiff::DiffLine}; +use zeta::{Zeta, udiff::DiffLine}; use crate::{ EvaluateArguments, PredictionOptions, diff --git a/crates/zeta_cli/src/example.rs b/crates/zeta_cli/src/example.rs index 67eed23f90dc1a5b48a53a2a7de07f500396ba9f..7dbe304a88b9ea024adab793fa782fd2f4bdf1c0 100644 --- a/crates/zeta_cli/src/example.rs +++ b/crates/zeta_cli/src/example.rs @@ -26,7 +26,7 @@ use project::{Project, ProjectPath}; use pulldown_cmark::CowStr; use serde::{Deserialize, Serialize}; use util::{paths::PathStyle, rel_path::RelPath}; -use zeta2::udiff::OpenedBuffers; +use zeta::udiff::OpenedBuffers; use crate::paths::{REPOS_DIR, WORKTREES_DIR}; @@ -557,7 +557,7 @@ impl NamedExample { project: &Entity, cx: &mut AsyncApp, ) -> Result> { - zeta2::udiff::apply_diff(&self.example.edit_history, project, cx).await + zeta::udiff::apply_diff(&self.example.edit_history, project, cx).await } } diff --git a/crates/zeta_cli/src/main.rs b/crates/zeta_cli/src/main.rs index 914b141915cd3a89cd35a02bc6c9463094f0de96..f87563cc34ca7631baf8195e42e4e3473f522659 100644 --- a/crates/zeta_cli/src/main.rs +++ b/crates/zeta_cli/src/main.rs @@ -31,7 +31,7 @@ use serde_json::json; use std::io::{self}; use std::time::Duration; use std::{collections::HashSet, path::PathBuf, str::FromStr, sync::Arc}; -use zeta2::ContextMode; +use zeta::ContextMode; #[derive(Parser, Debug)] #[command(name = "zeta")] @@ -193,13 +193,14 @@ pub struct EvaluateArguments { #[derive(clap::ValueEnum, Default, Debug, Clone, Copy, PartialEq)] enum PredictionProvider { + Zeta1, #[default] Zeta2, Sweep, } -fn zeta2_args_to_options(args: &Zeta2Args, omit_excerpt_overlaps: bool) -> zeta2::ZetaOptions { - zeta2::ZetaOptions { +fn zeta2_args_to_options(args: &Zeta2Args, omit_excerpt_overlaps: bool) -> zeta::ZetaOptions { + zeta::ZetaOptions { context: ContextMode::Syntax(EditPredictionContextOptions { max_retrieved_declarations: args.max_retrieved_definitions, use_imports: !args.disable_imports_gathering, @@ -397,7 +398,7 @@ async fn zeta2_syntax_context( let output = cx .update(|cx| { let zeta = cx.new(|cx| { - zeta2::Zeta::new(app_state.client.clone(), app_state.user_store.clone(), cx) + zeta::Zeta::new(app_state.client.clone(), app_state.user_store.clone(), cx) }); let indexing_done_task = zeta.update(cx, |zeta, cx| { zeta.set_options(zeta2_args_to_options(&args.zeta2_args, true)); @@ -435,7 +436,7 @@ async fn zeta1_context( args: ContextArgs, app_state: &Arc, cx: &mut AsyncApp, -) -> Result { +) -> Result { let LoadedContext { full_path_str, snapshot, @@ -450,7 +451,7 @@ async fn zeta1_context( let prompt_for_events = move || (events, 0); cx.update(|cx| { - zeta::gather_context( + zeta::zeta1::gather_context( full_path_str, &snapshot, clipped_cursor, diff --git a/crates/zeta_cli/src/predict.rs b/crates/zeta_cli/src/predict.rs index c792b318cec6de42e518793ed5400df0010ae5ea..a757a5faa0dbae95c4dcab58c76d50450b1d2e9f 100644 --- a/crates/zeta_cli/src/predict.rs +++ b/crates/zeta_cli/src/predict.rs @@ -21,7 +21,7 @@ use std::path::PathBuf; use std::sync::Arc; use std::sync::Mutex; use std::time::{Duration, Instant}; -use zeta2::{EvalCache, EvalCacheEntryKind, EvalCacheKey, Zeta}; +use zeta::{EvalCache, EvalCacheEntryKind, EvalCacheKey, Zeta}; pub async fn run_predict( args: PredictArguments, @@ -47,12 +47,13 @@ pub fn setup_zeta( cx: &mut AsyncApp, ) -> Result> { let zeta = - cx.new(|cx| zeta2::Zeta::new(app_state.client.clone(), app_state.user_store.clone(), cx))?; + cx.new(|cx| zeta::Zeta::new(app_state.client.clone(), app_state.user_store.clone(), cx))?; zeta.update(cx, |zeta, _cx| { let model = match provider { - PredictionProvider::Zeta2 => zeta2::ZetaEditPredictionModel::ZedCloud, - PredictionProvider::Sweep => zeta2::ZetaEditPredictionModel::Sweep, + PredictionProvider::Zeta1 => zeta::ZetaEditPredictionModel::Zeta1, + PredictionProvider::Zeta2 => zeta::ZetaEditPredictionModel::Zeta2, + PredictionProvider::Sweep => zeta::ZetaEditPredictionModel::Sweep, }; zeta.set_edit_prediction_model(model); })?; @@ -142,25 +143,25 @@ pub async fn perform_predict( let mut search_queries_executed_at = None; while let Some(event) = debug_rx.next().await { match event { - zeta2::ZetaDebugInfo::ContextRetrievalStarted(info) => { + zeta::ZetaDebugInfo::ContextRetrievalStarted(info) => { start_time = Some(info.timestamp); fs::write( example_run_dir.join("search_prompt.md"), &info.search_prompt, )?; } - zeta2::ZetaDebugInfo::SearchQueriesGenerated(info) => { + zeta::ZetaDebugInfo::SearchQueriesGenerated(info) => { search_queries_generated_at = Some(info.timestamp); fs::write( example_run_dir.join("search_queries.json"), serde_json::to_string_pretty(&info.search_queries).unwrap(), )?; } - zeta2::ZetaDebugInfo::SearchQueriesExecuted(info) => { + zeta::ZetaDebugInfo::SearchQueriesExecuted(info) => { search_queries_executed_at = Some(info.timestamp); } - zeta2::ZetaDebugInfo::ContextRetrievalFinished(_info) => {} - zeta2::ZetaDebugInfo::EditPredictionRequested(request) => { + zeta::ZetaDebugInfo::ContextRetrievalFinished(_info) => {} + zeta::ZetaDebugInfo::EditPredictionRequested(request) => { let prediction_started_at = Instant::now(); start_time.get_or_insert(prediction_started_at); let prompt = request.local_prompt.unwrap_or_default(); @@ -170,9 +171,9 @@ pub async fn perform_predict( let mut result = result.lock().unwrap(); result.prompt_len = prompt.chars().count(); - for included_file in request.request.included_files { + for included_file in request.inputs.included_files { let insertions = - vec![(request.request.cursor_point, CURSOR_MARKER)]; + vec![(request.inputs.cursor_point, CURSOR_MARKER)]; result.excerpts.extend(included_file.excerpts.iter().map( |excerpt| ActualExcerpt { path: included_file.path.components().skip(1).collect(), @@ -182,7 +183,7 @@ pub async fn perform_predict( write_codeblock( &included_file.path, included_file.excerpts.iter(), - if included_file.path == request.request.excerpt_path { + if included_file.path == request.inputs.cursor_path { &insertions } else { &[] @@ -196,7 +197,7 @@ pub async fn perform_predict( let response = request.response_rx.await?.0.map_err(|err| anyhow!(err))?; - let response = zeta2::text_from_response(response).unwrap_or_default(); + let response = zeta::text_from_response(response).unwrap_or_default(); let prediction_finished_at = Instant::now(); fs::write(example_run_dir.join("prediction_response.md"), &response)?; @@ -267,20 +268,7 @@ pub async fn perform_predict( let mut result = Arc::into_inner(result).unwrap().into_inner().unwrap(); result.diff = prediction - .map(|prediction| { - let old_text = prediction.snapshot.text(); - let new_text = prediction - .buffer - .update(cx, |buffer, cx| { - let branch = buffer.branch(cx); - branch.update(cx, |branch, cx| { - branch.edit(prediction.edits.iter().cloned(), None, cx); - branch.text() - }) - }) - .unwrap(); - language::unified_diff(&old_text, &new_text) - }) + .and_then(|prediction| prediction.edit_preview.as_unified_diff(&prediction.edits)) .unwrap_or_default(); anyhow::Ok(result) diff --git a/crates/zeta_cli/src/syntax_retrieval_stats.rs b/crates/zeta_cli/src/syntax_retrieval_stats.rs index f2634b1323d92b7136c591627226161b2905a955..4c7506ff78952da79acfeae751959bfe8182b9d4 100644 --- a/crates/zeta_cli/src/syntax_retrieval_stats.rs +++ b/crates/zeta_cli/src/syntax_retrieval_stats.rs @@ -32,7 +32,7 @@ use std::{ time::Duration, }; use util::paths::PathStyle; -use zeta2::ContextMode; +use zeta::ContextMode; use crate::headless::ZetaCliAppState; use crate::source_location::SourceLocation; @@ -44,7 +44,7 @@ pub async fn retrieval_stats( only_extension: Option, file_limit: Option, skip_files: Option, - options: zeta2::ZetaOptions, + options: zeta::ZetaOptions, cx: &mut AsyncApp, ) -> Result { let ContextMode::Syntax(context_options) = options.context.clone() else { From 0e2041dd41a8c7252b53f1ae8772d3a8f1e7e169 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ole=20J=C3=B8rgen=20Br=C3=B8nner?= Date: Tue, 25 Nov 2025 08:21:18 +0100 Subject: [PATCH 012/749] multi_buffer: Fix `editor::ExpandExcerpts` failing when cursor is at excerpt start (#42324) The bug is easily verified by: 1. open any multi-buffer 2. place the cursor at the beginning of an excerpt 3. run the editor::ExpandExcerpts / editor: expand excerpts action 4. The excerpt is not expanded Since the `buffer_ids_for_range` function basically did the same and had even been changed the same way earlier I DRYed these functions as well. Note: I'm a rust novice, so keep an extra eye on rust technicalities when reviewing :) --- Release Notes: - Fix editor: expand excerpts failing when cursor is at excerpt start --------- Co-authored-by: Lukas Wirth --- crates/multi_buffer/src/multi_buffer.rs | 32 +++-- crates/multi_buffer/src/multi_buffer_tests.rs | 114 ++++++++++++++++++ 2 files changed, 129 insertions(+), 17 deletions(-) diff --git a/crates/multi_buffer/src/multi_buffer.rs b/crates/multi_buffer/src/multi_buffer.rs index 7ecc09255b17ebbf2e68e21ab4c8d88f93d08d75..93fa26e02936884bc4b9dfd19bdea37455f1fd6e 100644 --- a/crates/multi_buffer/src/multi_buffer.rs +++ b/crates/multi_buffer/src/multi_buffer.rs @@ -3616,40 +3616,38 @@ impl MultiBufferSnapshot { }) } - pub fn excerpt_ids_for_range( + fn excerpts_for_range( &self, range: Range, - ) -> impl Iterator + '_ { + ) -> impl Iterator + '_ { let range = range.start.to_offset(self)..range.end.to_offset(self); let mut cursor = self.cursor::(); cursor.seek(&range.start); std::iter::from_fn(move || { let region = cursor.region()?; - if region.range.start >= range.end { + if region.range.start > range.end + || region.range.start == range.end && region.range.start > range.start + { return None; } cursor.next_excerpt(); - Some(region.excerpt.id) + Some(region.excerpt) }) } + pub fn excerpt_ids_for_range( + &self, + range: Range, + ) -> impl Iterator + '_ { + self.excerpts_for_range(range).map(|excerpt| excerpt.id) + } + pub fn buffer_ids_for_range( &self, range: Range, ) -> impl Iterator + '_ { - let range = range.start.to_offset(self)..range.end.to_offset(self); - let mut cursor = self.cursor::(); - cursor.seek(&range.start); - std::iter::from_fn(move || { - let region = cursor.region()?; - if region.range.start > range.end - || region.range.start == range.end && region.range.start > range.start - { - return None; - } - cursor.next_excerpt(); - Some(region.excerpt.buffer_id) - }) + self.excerpts_for_range(range) + .map(|excerpt| excerpt.buffer_id) } pub fn ranges_to_buffer_ranges( diff --git a/crates/multi_buffer/src/multi_buffer_tests.rs b/crates/multi_buffer/src/multi_buffer_tests.rs index 0151805d065b779569b3a2f8f02157f3ce129295..526c77db85a3efabb0e64b184dbed6fa90097558 100644 --- a/crates/multi_buffer/src/multi_buffer_tests.rs +++ b/crates/multi_buffer/src/multi_buffer_tests.rs @@ -4095,3 +4095,117 @@ fn test_random_chunk_bitmaps_with_diffs(cx: &mut App, mut rng: StdRng) { } } } + +/// Tests `excerpt_containing` and `excerpts_for_range` (functions mapping multi-buffer text-coordinates to excerpts) +#[gpui::test] +fn test_excerpts_containment_functions(cx: &mut App) { + // Multibuffer content for these tests: + // 0123 + // 0: aa0 + // 1: aa1 + // ----- + // 2: bb0 + // 3: bb1 + // -----MultiBufferOffset(0).. + // 4: cc0 + + let buffer_1 = cx.new(|cx| Buffer::local("aa0\naa1", cx)); + let buffer_2 = cx.new(|cx| Buffer::local("bb0\nbb1", cx)); + let buffer_3 = cx.new(|cx| Buffer::local("cc0", cx)); + + let multibuffer = cx.new(|_| MultiBuffer::new(Capability::ReadWrite)); + + let (excerpt_1_id, excerpt_2_id, excerpt_3_id) = multibuffer.update(cx, |multibuffer, cx| { + let excerpt_1_id = multibuffer.push_excerpts( + buffer_1.clone(), + [ExcerptRange::new(Point::new(0, 0)..Point::new(1, 3))], + cx, + )[0]; + + let excerpt_2_id = multibuffer.push_excerpts( + buffer_2.clone(), + [ExcerptRange::new(Point::new(0, 0)..Point::new(1, 3))], + cx, + )[0]; + + let excerpt_3_id = multibuffer.push_excerpts( + buffer_3.clone(), + [ExcerptRange::new(Point::new(0, 0)..Point::new(0, 3))], + cx, + )[0]; + + (excerpt_1_id, excerpt_2_id, excerpt_3_id) + }); + + let snapshot = multibuffer.read(cx).snapshot(cx); + + assert_eq!(snapshot.text(), "aa0\naa1\nbb0\nbb1\ncc0"); + + //// Test `excerpts_for_range` + + let p00 = snapshot.point_to_offset(Point::new(0, 0)); + let p10 = snapshot.point_to_offset(Point::new(1, 0)); + let p20 = snapshot.point_to_offset(Point::new(2, 0)); + let p23 = snapshot.point_to_offset(Point::new(2, 3)); + let p13 = snapshot.point_to_offset(Point::new(1, 3)); + let p40 = snapshot.point_to_offset(Point::new(4, 0)); + let p43 = snapshot.point_to_offset(Point::new(4, 3)); + + let excerpts: Vec<_> = snapshot.excerpts_for_range(p00..p00).collect(); + assert_eq!(excerpts.len(), 1); + assert_eq!(excerpts[0].id, excerpt_1_id); + + // Cursor at very end of excerpt 3 + let excerpts: Vec<_> = snapshot.excerpts_for_range(p43..p43).collect(); + assert_eq!(excerpts.len(), 1); + assert_eq!(excerpts[0].id, excerpt_3_id); + + let excerpts: Vec<_> = snapshot.excerpts_for_range(p00..p23).collect(); + assert_eq!(excerpts.len(), 2); + assert_eq!(excerpts[0].id, excerpt_1_id); + assert_eq!(excerpts[1].id, excerpt_2_id); + + // This range represent an selection with end-point just inside excerpt_2 + // Today we only expand the first excerpt, but another interpretation that + // we could consider is expanding both here + let excerpts: Vec<_> = snapshot.excerpts_for_range(p10..p20).collect(); + assert_eq!(excerpts.len(), 1); + assert_eq!(excerpts[0].id, excerpt_1_id); + + //// Test that `excerpts_for_range` and `excerpt_containing` agree for all single offsets (cursor positions) + for offset in 0..=snapshot.len().0 { + let offset = MultiBufferOffset(offset); + let excerpts_for_range: Vec<_> = snapshot.excerpts_for_range(offset..offset).collect(); + assert_eq!( + excerpts_for_range.len(), + 1, + "Expected exactly one excerpt for offset {offset}", + ); + + let excerpt_containing = snapshot.excerpt_containing(offset..offset); + assert!( + excerpt_containing.is_some(), + "Expected excerpt_containing to find excerpt for offset {offset}", + ); + + assert_eq!( + excerpts_for_range[0].id, + excerpt_containing.unwrap().id(), + "excerpts_for_range and excerpt_containing should agree for offset {offset}", + ); + } + + //// Test `excerpt_containing` behavior with ranges: + + // Ranges intersecting a single-excerpt + let containing = snapshot.excerpt_containing(p00..p13); + assert!(containing.is_some()); + assert_eq!(containing.unwrap().id(), excerpt_1_id); + + // Ranges intersecting multiple excerpts (should return None) + let containing = snapshot.excerpt_containing(p20..p40); + assert!( + containing.is_none(), + "excerpt_containing should return None for ranges spanning multiple excerpts" + ); +} From 303c23cf1e7ce6226f99dd989b9173c3799622a2 Mon Sep 17 00:00:00 2001 From: Kirill Bulatov Date: Tue, 25 Nov 2025 09:34:23 +0200 Subject: [PATCH 013/749] Fix first window open not focusing the modals (#43180) Closes https://github.com/zed-industries/zed/issues/4357 Closes https://github.com/zed-industries/zed/issues/41278 Release Notes: - Fixed modals not getting focus on window reopen --------- Co-authored-by: Conrad Irwin --- crates/agent_ui/src/acp/thread_view.rs | 1 - crates/agent_ui/src/agent_panel.rs | 20 +++++++++++++++----- 2 files changed, 15 insertions(+), 6 deletions(-) diff --git a/crates/agent_ui/src/acp/thread_view.rs b/crates/agent_ui/src/acp/thread_view.rs index 92765140f5101034a30fc95db675ff335f2cb324..fd0b1eedbdf80d1893760e6182cd2e57d96ef010 100644 --- a/crates/agent_ui/src/acp/thread_view.rs +++ b/crates/agent_ui/src/acp/thread_view.rs @@ -653,7 +653,6 @@ impl AcpThreadView { mode_selector, _subscriptions: subscriptions, }; - this.message_editor.focus_handle(cx).focus(window); this.profile_selector = this.as_native_thread(cx).map(|thread| { cx.new(|cx| { diff --git a/crates/agent_ui/src/agent_panel.rs b/crates/agent_ui/src/agent_panel.rs index 6ff909389986ec27b998c4554fe2d86115ef1785..22eb11e24a8fd706c80aa65c3dcf5d8ae3876ddc 100644 --- a/crates/agent_ui/src/agent_panel.rs +++ b/crates/agent_ui/src/agent_panel.rs @@ -816,6 +816,7 @@ impl AgentPanel { window, cx, ), + true, window, cx, ); @@ -911,7 +912,12 @@ impl AgentPanel { ) }); - this.set_active_view(ActiveView::ExternalAgentThread { thread_view }, window, cx); + this.set_active_view( + ActiveView::ExternalAgentThread { thread_view }, + !loading, + window, + cx, + ); }) }) .detach_and_log_err(cx); @@ -953,10 +959,10 @@ impl AgentPanel { fn open_history(&mut self, window: &mut Window, cx: &mut Context) { if matches!(self.active_view, ActiveView::History) { if let Some(previous_view) = self.previous_view.take() { - self.set_active_view(previous_view, window, cx); + self.set_active_view(previous_view, true, window, cx); } } else { - self.set_active_view(ActiveView::History, window, cx); + self.set_active_view(ActiveView::History, true, window, cx); } cx.notify(); } @@ -1012,6 +1018,7 @@ impl AgentPanel { window, cx, ), + true, window, cx, ); @@ -1157,7 +1164,7 @@ impl AgentPanel { let context_server_store = self.project.read(cx).context_server_store(); let fs = self.fs.clone(); - self.set_active_view(ActiveView::Configuration, window, cx); + self.set_active_view(ActiveView::Configuration, true, window, cx); self.configuration = Some(cx.new(|cx| { AgentConfiguration::new( fs, @@ -1274,6 +1281,7 @@ impl AgentPanel { fn set_active_view( &mut self, new_view: ActiveView, + focus: bool, window: &mut Window, cx: &mut Context, ) { @@ -1312,7 +1320,9 @@ impl AgentPanel { self.active_view = new_view; } - self.focus_handle(cx).focus(window); + if focus { + self.focus_handle(cx).focus(window); + } } fn populate_recently_opened_menu_section( From e6fe95b4f2f676c7fc4a5f951ba7c721e7d22e8a Mon Sep 17 00:00:00 2001 From: Kirill Bulatov Date: Tue, 25 Nov 2025 10:25:49 +0200 Subject: [PATCH 014/749] Only show ssh logs when toggled (#43445) Same as in collab projects. Release Notes: - N/A --- crates/project/src/lsp_store/log_store.rs | 44 +++++++++++--------- crates/remote_server/src/headless_project.rs | 40 +++++++++--------- 2 files changed, 45 insertions(+), 39 deletions(-) diff --git a/crates/project/src/lsp_store/log_store.rs b/crates/project/src/lsp_store/log_store.rs index 00098712bf0092a6795de2ed48c7ccf15925c555..92f8fecadd0236e899ef16781e55405dfe05f282 100644 --- a/crates/project/src/lsp_store/log_store.rs +++ b/crates/project/src/lsp_store/log_store.rs @@ -344,22 +344,7 @@ impl LogStore { enabled, toggled_log_kind, } => { - if let Some(server_state) = - log_store.get_language_server_state(*server_id) - { - if *enabled { - server_state.toggled_log_kind = Some(*toggled_log_kind); - } else { - server_state.toggled_log_kind = None; - } - } - if LogKind::Rpc == *toggled_log_kind { - if *enabled { - log_store.enable_rpc_trace_for_language_server(*server_id); - } else { - log_store.disable_rpc_trace_for_language_server(*server_id); - } - } + log_store.toggle_lsp_logs(*server_id, *enabled, *toggled_log_kind); } _ => {} } @@ -676,7 +661,6 @@ impl LogStore { } fn emit_event(&mut self, e: Event, cx: &mut Context) { - let on_headless_host = self.on_headless_host; match &e { Event::NewServerLogEntry { id, kind, text } => { if let Some(state) = self.get_language_server_state(*id) { @@ -690,9 +674,7 @@ impl LogStore { } .and_then(|lsp_store| lsp_store.read(cx).downstream_client()); if let Some((client, project_id)) = downstream_client { - if on_headless_host - || Some(LogKind::from_server_log_type(kind)) == state.toggled_log_kind - { + if Some(LogKind::from_server_log_type(kind)) == state.toggled_log_kind { client .send(proto::LanguageServerLog { project_id, @@ -709,4 +691,26 @@ impl LogStore { cx.emit(e); } + + pub fn toggle_lsp_logs( + &mut self, + server_id: LanguageServerId, + enabled: bool, + toggled_log_kind: LogKind, + ) { + if let Some(server_state) = self.get_language_server_state(server_id) { + if enabled { + server_state.toggled_log_kind = Some(toggled_log_kind); + } else { + server_state.toggled_log_kind = None; + } + } + if LogKind::Rpc == toggled_log_kind { + if enabled { + self.enable_rpc_trace_for_language_server(server_id); + } else { + self.disable_rpc_trace_for_language_server(server_id); + } + } + } } diff --git a/crates/remote_server/src/headless_project.rs b/crates/remote_server/src/headless_project.rs index 6d64202a038145fc32dc5e5896484e23f03dacef..f5cce907f956d7127aeb272cfef27ecb5f6375a7 100644 --- a/crates/remote_server/src/headless_project.rs +++ b/crates/remote_server/src/headless_project.rs @@ -17,7 +17,7 @@ use project::{ debugger::{breakpoint_store::BreakpointStore, dap_store::DapStore}, git_store::GitStore, image_store::ImageId, - lsp_store::log_store::{self, GlobalLogStore, LanguageServerKind}, + lsp_store::log_store::{self, GlobalLogStore, LanguageServerKind, LogKind}, project_settings::SettingsObserver, search::SearchQuery, task_store::TaskStore, @@ -623,26 +623,28 @@ impl HeadlessProject { async fn handle_toggle_lsp_logs( _: Entity, envelope: TypedEnvelope, - mut cx: AsyncApp, + cx: AsyncApp, ) -> Result<()> { let server_id = LanguageServerId::from_proto(envelope.payload.server_id); - let lsp_logs = cx - .update(|cx| { - cx.try_global::() - .map(|lsp_logs| lsp_logs.0.clone()) - })? - .context("lsp logs store is missing")?; - - lsp_logs.update(&mut cx, |lsp_logs, _| { - // RPC logs are very noisy and we need to toggle it on the headless server too. - // The rest of the logs for the ssh project are very important to have toggled always, - // to e.g. send language server error logs to the client before anything is toggled. - if envelope.payload.enabled { - lsp_logs.enable_rpc_trace_for_language_server(server_id); - } else { - lsp_logs.disable_rpc_trace_for_language_server(server_id); - } - })?; + cx.update(|cx| { + let log_store = cx + .try_global::() + .map(|global_log_store| global_log_store.0.clone()) + .context("lsp logs store is missing")?; + let toggled_log_kind = + match proto::toggle_lsp_logs::LogType::from_i32(envelope.payload.log_type) + .context("invalid log type")? + { + proto::toggle_lsp_logs::LogType::Log => LogKind::Logs, + proto::toggle_lsp_logs::LogType::Trace => LogKind::Trace, + proto::toggle_lsp_logs::LogType::Rpc => LogKind::Rpc, + }; + log_store.update(cx, |log_store, _| { + log_store.toggle_lsp_logs(server_id, envelope.payload.enabled, toggled_log_kind); + }); + anyhow::Ok(()) + })??; + Ok(()) } From c0e85481b09da81c852a7fc3f793a82fe125fe97 Mon Sep 17 00:00:00 2001 From: Piotr Osiewicz <24362066+osiewicz@users.noreply.github.com> Date: Tue, 25 Nov 2025 10:11:43 +0100 Subject: [PATCH 015/749] lsp: Fix potential double didClose notification when renaming a file (#43448) Closes #42709 Release Notes: - N/A --- crates/project/src/lsp_store.rs | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/crates/project/src/lsp_store.rs b/crates/project/src/lsp_store.rs index 349bfa9ed00223ea71d4d77dd32bdf433c39c784..f5d931737dff9a873fc5d63e5445b2b5d49bab56 100644 --- a/crates/project/src/lsp_store.rs +++ b/crates/project/src/lsp_store.rs @@ -2684,10 +2684,15 @@ impl LocalLspStore { cx: &mut App, ) { buffer.update(cx, |buffer, cx| { - let _ = self.buffer_snapshots.remove(&buffer.remote_id()); + let mut snapshots = self.buffer_snapshots.remove(&buffer.remote_id()); for (_, language_server) in self.language_servers_for_buffer(buffer, cx) { - language_server.unregister_buffer(file_url.clone()); + if snapshots + .as_mut() + .is_some_and(|map| map.remove(&language_server.server_id()).is_some()) + { + language_server.unregister_buffer(file_url.clone()); + } } }); } From 5139cc2bfb6380ef0520727f2da57771018529e9 Mon Sep 17 00:00:00 2001 From: AidanV <84053180+AidanV@users.noreply.github.com> Date: Tue, 25 Nov 2025 01:20:01 -0800 Subject: [PATCH 016/749] helix: Fix `Vim::NextWordEnd` off-by-one in `HelixSelect` (#43234) Closes #43209 Closes #38121 Starting on the first character. Running `v e` before changes: image Running `v e` after changes: image Change Notes: - Added helix selection sanitation code that directly mirrors the code in the Vim [`visual_motion`](https://github.com/AidanV/zed/blob/b6728c080c5d14ded7002d0276deb5c19d42ed8a/crates/vim/src/visual.rs#L237) method. I kept the comments from the Vim section that explains its purpose. - The above change converted the problem from fixing `v e` to fixing `v w`. Since `w` is treated differently in Helix than in Vim (i.e. `w` in Vim goes to the first character of a word and `w` in Helix goes to the character before a word. Commented [here](https://github.com/AidanV/zed/blob/b6728c080c5d14ded7002d0276deb5c19d42ed8a/crates/vim/src/helix.rs#L132)), the code treats `w` in `HelixSelect` as a motion that differs from the Vim motion in the same way that the function [`helix_move_cursor`](https://github.com/AidanV/zed/blob/b6728c080c5d14ded7002d0276deb5c19d42ed8a/crates/vim/src/helix.rs#L353) separates these behaviors. - Added a regression test Release Notes: - Fixes bug where `Vim::NextWordEnd` in `HelixSelect` would not select whole word. --- crates/vim/src/helix.rs | 185 ++++++++++++++++++++++++++++++---------- 1 file changed, 139 insertions(+), 46 deletions(-) diff --git a/crates/vim/src/helix.rs b/crates/vim/src/helix.rs index 67c99ff6aea249692bddc38d3681be5c491a7437..fae2bda578c6844c33290d059248b895ebde4c3d 100644 --- a/crates/vim/src/helix.rs +++ b/crates/vim/src/helix.rs @@ -109,19 +109,76 @@ impl Vim { }; s.move_with(|map, selection| { - let current_head = selection.head(); - - let Some((new_head, goal)) = motion.move_point( - map, - current_head, - selection.goal, - times, - &text_layout_details, - ) else { - return; + let was_reversed = selection.reversed; + let mut current_head = selection.head(); + + // our motions assume the current character is after the cursor, + // but in (forward) visual mode the current character is just + // before the end of the selection. + + // If the file ends with a newline (which is common) we don't do this. + // so that if you go to the end of such a file you can use "up" to go + // to the previous line and have it work somewhat as expected. + if !selection.reversed + && !selection.is_empty() + && !(selection.end.column() == 0 && selection.end == map.max_point()) + { + current_head = movement::left(map, selection.end) + } + + let (new_head, goal) = match motion { + // Going to next word start is special cased + // since Vim differs from Helix in that motion + // Vim: `w` goes to the first character of a word + // Helix: `w` goes to the character before a word + Motion::NextWordStart { ignore_punctuation } => { + let mut head = movement::right(map, current_head); + let classifier = + map.buffer_snapshot().char_classifier_at(head.to_point(map)); + for _ in 0..times.unwrap_or(1) { + let (_, new_head) = + movement::find_boundary_trail(map, head, |left, right| { + Self::is_boundary_right(ignore_punctuation)( + left, + right, + &classifier, + ) + }); + head = new_head; + } + head = movement::left(map, head); + (head, SelectionGoal::None) + } + _ => motion + .move_point( + map, + current_head, + selection.goal, + times, + &text_layout_details, + ) + .unwrap_or((current_head, selection.goal)), }; selection.set_head(new_head, goal); + + // ensure the current character is included in the selection. + if !selection.reversed { + let next_point = movement::right(map, selection.end); + + if !(next_point.column() == 0 && next_point == map.max_point()) { + selection.end = next_point; + } + } + + // vim always ensures the anchor character stays selected. + // if our selection has reversed, we need to move the opposite end + // to ensure the anchor is still selected. + if was_reversed && !selection.reversed { + selection.start = movement::left(map, selection.start); + } else if !was_reversed && selection.reversed { + selection.end = movement::right(map, selection.end); + } }) }); }); @@ -255,6 +312,30 @@ impl Vim { }); } + fn is_boundary_right( + ignore_punctuation: bool, + ) -> impl FnMut(char, char, &CharClassifier) -> bool { + move |left, right, classifier| { + let left_kind = classifier.kind_with(left, ignore_punctuation); + let right_kind = classifier.kind_with(right, ignore_punctuation); + let at_newline = (left == '\n') ^ (right == '\n'); + + (left_kind != right_kind && right_kind != CharKind::Whitespace) || at_newline + } + } + + fn is_boundary_left( + ignore_punctuation: bool, + ) -> impl FnMut(char, char, &CharClassifier) -> bool { + move |left, right, classifier| { + let left_kind = classifier.kind_with(left, ignore_punctuation); + let right_kind = classifier.kind_with(right, ignore_punctuation); + let at_newline = (left == '\n') ^ (right == '\n'); + + (left_kind != right_kind && left_kind != CharKind::Whitespace) || at_newline + } + } + pub fn helix_move_cursor( &mut self, motion: Motion, @@ -263,6 +344,30 @@ impl Vim { cx: &mut Context, ) { match motion { + Motion::NextWordStart { ignore_punctuation } => self.helix_find_range_forward( + times, + window, + cx, + Self::is_boundary_right(ignore_punctuation), + ), + Motion::NextWordEnd { ignore_punctuation } => self.helix_find_range_forward( + times, + window, + cx, + Self::is_boundary_left(ignore_punctuation), + ), + Motion::PreviousWordStart { ignore_punctuation } => self.helix_find_range_backward( + times, + window, + cx, + Self::is_boundary_left(ignore_punctuation), + ), + Motion::PreviousWordEnd { ignore_punctuation } => self.helix_find_range_backward( + times, + window, + cx, + Self::is_boundary_right(ignore_punctuation), + ), Motion::EndOfLine { .. } => { // In Helix mode, EndOfLine should position cursor ON the last character, // not after it. We therefore need special handling for it. @@ -288,42 +393,6 @@ impl Vim { }); }); } - Motion::NextWordStart { ignore_punctuation } => { - self.helix_find_range_forward(times, window, cx, |left, right, classifier| { - let left_kind = classifier.kind_with(left, ignore_punctuation); - let right_kind = classifier.kind_with(right, ignore_punctuation); - let at_newline = (left == '\n') ^ (right == '\n'); - - (left_kind != right_kind && right_kind != CharKind::Whitespace) || at_newline - }) - } - Motion::NextWordEnd { ignore_punctuation } => { - self.helix_find_range_forward(times, window, cx, |left, right, classifier| { - let left_kind = classifier.kind_with(left, ignore_punctuation); - let right_kind = classifier.kind_with(right, ignore_punctuation); - let at_newline = (left == '\n') ^ (right == '\n'); - - (left_kind != right_kind && left_kind != CharKind::Whitespace) || at_newline - }) - } - Motion::PreviousWordStart { ignore_punctuation } => { - self.helix_find_range_backward(times, window, cx, |left, right, classifier| { - let left_kind = classifier.kind_with(left, ignore_punctuation); - let right_kind = classifier.kind_with(right, ignore_punctuation); - let at_newline = (left == '\n') ^ (right == '\n'); - - (left_kind != right_kind && left_kind != CharKind::Whitespace) || at_newline - }) - } - Motion::PreviousWordEnd { ignore_punctuation } => { - self.helix_find_range_backward(times, window, cx, |left, right, classifier| { - let left_kind = classifier.kind_with(left, ignore_punctuation); - let right_kind = classifier.kind_with(right, ignore_punctuation); - let at_newline = (left == '\n') ^ (right == '\n'); - - (left_kind != right_kind && right_kind != CharKind::Whitespace) || at_newline - }) - } Motion::FindForward { before, char, @@ -1394,6 +1463,30 @@ mod test { cx.assert_state("«one ˇ»two", Mode::HelixNormal); } + #[gpui::test] + async fn test_helix_select_motion(cx: &mut gpui::TestAppContext) { + let mut cx = VimTestContext::new(cx, true).await; + cx.enable_helix(); + + cx.set_state("«ˇ»one two three", Mode::HelixSelect); + cx.simulate_keystrokes("w"); + cx.assert_state("«one ˇ»two three", Mode::HelixSelect); + + cx.set_state("«ˇ»one two three", Mode::HelixSelect); + cx.simulate_keystrokes("e"); + cx.assert_state("«oneˇ» two three", Mode::HelixSelect); + } + + #[gpui::test] + async fn test_helix_full_cursor_selection(cx: &mut gpui::TestAppContext) { + let mut cx = VimTestContext::new(cx, true).await; + cx.enable_helix(); + + cx.set_state("ˇone two three", Mode::HelixNormal); + cx.simulate_keystrokes("l l v h h h"); + cx.assert_state("«ˇone» two three", Mode::HelixSelect); + } + #[gpui::test] async fn test_helix_select_regex(cx: &mut gpui::TestAppContext) { let mut cx = VimTestContext::new(cx, true).await; From 7651854bbd892e2938673b8a5eed0d7f53278103 Mon Sep 17 00:00:00 2001 From: Piotr Osiewicz <24362066+osiewicz@users.noreply.github.com> Date: Tue, 25 Nov 2025 10:22:00 +0100 Subject: [PATCH 017/749] ci: Do not show output of failed tests at the end too (#43449) This reverts #39643, effectively For the record, @SomeoneToIgnore found it quite cumbersome to scroll through logs just to see which tests have failed. I kinda see the argument. At the same time, I wish nextest could do both: it could aggregate logs of failed tests and then print out the summary. Release Notes: - N/A --- .github/workflows/extension_tests.yml | 2 +- .github/workflows/release.yml | 6 +++--- .github/workflows/release_nightly.yml | 2 +- .github/workflows/run_tests.yml | 6 +++--- tooling/xtask/src/tasks/workflows/steps.rs | 5 +---- 5 files changed, 9 insertions(+), 12 deletions(-) diff --git a/.github/workflows/extension_tests.yml b/.github/workflows/extension_tests.yml index 89289fbea20999ada413ef1801bb428f03c82c6b..e579c6739dd3201d37b8029fcbc205f28f9bafd9 100644 --- a/.github/workflows/extension_tests.yml +++ b/.github/workflows/extension_tests.yml @@ -77,7 +77,7 @@ jobs: uses: taiki-e/install-action@nextest - name: steps::cargo_nextest if: inputs.run_tests - run: cargo nextest run --workspace --no-fail-fast --failure-output immediate-final + run: cargo nextest run --workspace --no-fail-fast shell: bash -euxo pipefail {0} timeout-minutes: 3 check_extension: diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 90d105880f94ee428f01746ed627f5c6f7d4e246..7afac285b5a34df2aadd04952400809059e12222 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -33,7 +33,7 @@ jobs: run: ./script/clear-target-dir-if-larger-than 300 shell: bash -euxo pipefail {0} - name: steps::cargo_nextest - run: cargo nextest run --workspace --no-fail-fast --failure-output immediate-final + run: cargo nextest run --workspace --no-fail-fast shell: bash -euxo pipefail {0} - name: steps::cleanup_cargo_config if: always() @@ -80,7 +80,7 @@ jobs: run: ./script/clear-target-dir-if-larger-than 250 shell: bash -euxo pipefail {0} - name: steps::cargo_nextest - run: cargo nextest run --workspace --no-fail-fast --failure-output immediate-final + run: cargo nextest run --workspace --no-fail-fast shell: bash -euxo pipefail {0} - name: steps::cleanup_cargo_config if: always() @@ -112,7 +112,7 @@ jobs: run: ./script/clear-target-dir-if-larger-than.ps1 250 shell: pwsh - name: steps::cargo_nextest - run: cargo nextest run --workspace --no-fail-fast --failure-output immediate-final + run: cargo nextest run --workspace --no-fail-fast shell: pwsh - name: steps::cleanup_cargo_config if: always() diff --git a/.github/workflows/release_nightly.yml b/.github/workflows/release_nightly.yml index bb327f2c5527d353c9aad01c3e26edcf5baac78c..d76244175accc3e816cbd7d5dc322d2529a0a236 100644 --- a/.github/workflows/release_nightly.yml +++ b/.github/workflows/release_nightly.yml @@ -51,7 +51,7 @@ jobs: run: ./script/clear-target-dir-if-larger-than.ps1 250 shell: pwsh - name: steps::cargo_nextest - run: cargo nextest run --workspace --no-fail-fast --failure-output immediate-final + run: cargo nextest run --workspace --no-fail-fast shell: pwsh - name: steps::cleanup_cargo_config if: always() diff --git a/.github/workflows/run_tests.yml b/.github/workflows/run_tests.yml index 323dd7fd1b52eb43400658470ee7d7c986f219fa..8ed11788d44317d93899f629956567228fbb55fe 100644 --- a/.github/workflows/run_tests.yml +++ b/.github/workflows/run_tests.yml @@ -117,7 +117,7 @@ jobs: run: ./script/clear-target-dir-if-larger-than.ps1 250 shell: pwsh - name: steps::cargo_nextest - run: cargo nextest run --workspace --no-fail-fast --failure-output immediate-final + run: cargo nextest run --workspace --no-fail-fast shell: pwsh - name: steps::cleanup_cargo_config if: always() @@ -166,7 +166,7 @@ jobs: run: ./script/clear-target-dir-if-larger-than 250 shell: bash -euxo pipefail {0} - name: steps::cargo_nextest - run: cargo nextest run --workspace --no-fail-fast --failure-output immediate-final + run: cargo nextest run --workspace --no-fail-fast shell: bash -euxo pipefail {0} - name: steps::cleanup_cargo_config if: always() @@ -200,7 +200,7 @@ jobs: run: ./script/clear-target-dir-if-larger-than 300 shell: bash -euxo pipefail {0} - name: steps::cargo_nextest - run: cargo nextest run --workspace --no-fail-fast --failure-output immediate-final + run: cargo nextest run --workspace --no-fail-fast shell: bash -euxo pipefail {0} - name: steps::cleanup_cargo_config if: always() diff --git a/tooling/xtask/src/tasks/workflows/steps.rs b/tooling/xtask/src/tasks/workflows/steps.rs index c5edbdf8439675b4264388001322c12f4f3026e9..910b344cb7319e4f58911b3025632e560553716a 100644 --- a/tooling/xtask/src/tasks/workflows/steps.rs +++ b/tooling/xtask/src/tasks/workflows/steps.rs @@ -53,10 +53,7 @@ pub fn cargo_install_nextest() -> Step { } pub fn cargo_nextest(platform: Platform) -> Step { - named::run( - platform, - "cargo nextest run --workspace --no-fail-fast --failure-output immediate-final", - ) + named::run(platform, "cargo nextest run --workspace --no-fail-fast") } pub fn setup_cargo_config(platform: Platform) -> Step { From a359a5a1f248616f4bece8d082fab0c6fe60cc9c Mon Sep 17 00:00:00 2001 From: David Kleingeld Date: Tue, 25 Nov 2025 13:49:27 +0100 Subject: [PATCH 018/749] Add performance doc (#43265) Release Notes: - N/A --- docs/src/SUMMARY.md | 1 + docs/src/performance.md | 32 ++++++++++++++++++++++++++++++++ 2 files changed, 33 insertions(+) create mode 100644 docs/src/performance.md diff --git a/docs/src/SUMMARY.md b/docs/src/SUMMARY.md index dc42cfbdbb89d06162016f8ec2548ad630d20bc9..7425e77fb42af3922ca50fbb8fae7cd8f75d9313 100644 --- a/docs/src/SUMMARY.md +++ b/docs/src/SUMMARY.md @@ -167,6 +167,7 @@ - [FreeBSD](./development/freebsd.md) - [Local Collaboration](./development/local-collaboration.md) - [Using Debuggers](./development/debuggers.md) + - [Performance](./performance.md) - [Glossary](./development/glossary.md) - [Release Notes](./development/release-notes.md) - [Debugging Crashes](./development/debugging-crashes.md) diff --git a/docs/src/performance.md b/docs/src/performance.md new file mode 100644 index 0000000000000000000000000000000000000000..bcd9da1a59533469758d309cb19b8dec30bab012 --- /dev/null +++ b/docs/src/performance.md @@ -0,0 +1,32 @@ +How to use our internal tools to profile and keep Zed fast. + +# Tracy + +Get a profile of the zed foreground executor + +The profiler always runs in the background. You can save a trace from its UI or look at the results live. + +## Setup/Building the importer: + +- Clone the repo at git@github.com:zed-industries/tracy.git on v0.12.2 branch +- `cd profiler && mkdir build && cd build` +- Run cmake to generate build files: `cmake -G Ninja -DCMAKE_BUILD_TYPE=Release ..` +- Build the importer: `ninja` +- Run the impoter on the trace file: `./tracy-import-miniprofiler /path/to/trace.miniprof /path/to/output.tracy` +- Open the trace in tracy: + - If you're on windows download the v0.12.2 version from the releases on the upstream repo + - If you're on other platforms open it on the website: https://tracy.nereid.pl/ (the version might mismatch so your luck might vary, we need to host our own ideally..) + +## To Save a Trace: + +- Run the action: `zed open performance profiler` +- Hit the save button. This opens a save dialog or if that fails to open the trace gets saved in your working directory. +- Convert the profile so it can be imported in tracy using the importer: `./tracy-import-miniprofiler output.tracy` +- Go to hit the 'power button' in the top left and then open saved trace. +- Now zoom in to see the tasks and how long they took + +# Warn if function is slow + +```rust +let _timer = zlog::time!("my_function_name").warn_if_gt(std::time::Duration::from_millis(100)); +``` From f8965317c394b758106bd83aebb94ddaea0ef00e Mon Sep 17 00:00:00 2001 From: Lukas Wirth Date: Tue, 25 Nov 2025 14:41:19 +0100 Subject: [PATCH 019/749] multi_buffer: Fix up some anchor checks (#43454) Release Notes: - N/A *or* Added/Fixed/Improved ... --- Cargo.lock | 1 + crates/agent_ui/src/inline_assistant.rs | 1 + crates/assistant_text_thread/Cargo.toml | 1 + .../src/assistant_text_thread_tests.rs | 7 ++- .../assistant_text_thread/src/text_thread.rs | 12 +++-- crates/buffer_diff/src/buffer_diff.rs | 1 + crates/editor/src/selections_collection.rs | 23 ++++++++++ crates/multi_buffer/src/anchor.rs | 44 +++++++++++++++---- crates/multi_buffer/src/multi_buffer.rs | 43 +++++++++++++----- crates/multi_buffer/src/multi_buffer_tests.rs | 2 + crates/multi_buffer/src/path_key.rs | 13 ++++-- crates/remote/src/transport/wsl.rs | 25 +++++------ crates/rope/src/rope.rs | 6 +-- crates/text/src/anchor.rs | 20 ++++++++- crates/text/src/text.rs | 13 +++--- 15 files changed, 154 insertions(+), 58 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 93961b4181aa1ad721ba8d740736d86c2ae32ca2..2698d882403b159f8ed350c59cc8e98ab467360d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -884,6 +884,7 @@ dependencies = [ "fuzzy", "gpui", "indoc", + "itertools 0.14.0", "language", "language_model", "log", diff --git a/crates/agent_ui/src/inline_assistant.rs b/crates/agent_ui/src/inline_assistant.rs index f822c79f2589c757173bcd2699ef6abf2ac51027..81242135757561a6c829cc9cabf8893294d9e875 100644 --- a/crates/agent_ui/src/inline_assistant.rs +++ b/crates/agent_ui/src/inline_assistant.rs @@ -1445,6 +1445,7 @@ impl InlineAssistant { multi_buffer.update(cx, |multi_buffer, cx| { multi_buffer.push_excerpts( old_buffer.clone(), + // todo(lw): buffer_start and buffer_end might come from different snapshots! Some(ExcerptRange::new(buffer_start..buffer_end)), cx, ); diff --git a/crates/assistant_text_thread/Cargo.toml b/crates/assistant_text_thread/Cargo.toml index 8dfdfa3828340217456088a246eee5b1568a7a77..7c8fcca3bfa81f6f2de570fa68ecc795cb81b257 100644 --- a/crates/assistant_text_thread/Cargo.toml +++ b/crates/assistant_text_thread/Cargo.toml @@ -29,6 +29,7 @@ fs.workspace = true futures.workspace = true fuzzy.workspace = true gpui.workspace = true +itertools.workspace = true language.workspace = true language_model.workspace = true log.workspace = true diff --git a/crates/assistant_text_thread/src/assistant_text_thread_tests.rs b/crates/assistant_text_thread/src/assistant_text_thread_tests.rs index 75a414dfc4428b3c101a72454bb185b5a171d692..0743641bf5ce33850f28987d834b2e79771cff6f 100644 --- a/crates/assistant_text_thread/src/assistant_text_thread_tests.rs +++ b/crates/assistant_text_thread/src/assistant_text_thread_tests.rs @@ -880,10 +880,9 @@ async fn test_random_context_collaboration(cx: &mut TestAppContext, mut rng: Std let num_sections = rng.random_range(0..=3); let mut section_start = 0; for _ in 0..num_sections { - let mut section_end = rng.random_range(section_start..=output_text.len()); - while !output_text.is_char_boundary(section_end) { - section_end += 1; - } + let section_end = output_text.floor_char_boundary( + rng.random_range(section_start..=output_text.len()), + ); events.push(Ok(SlashCommandEvent::StartSection { icon: IconName::Ai, label: "section".into(), diff --git a/crates/assistant_text_thread/src/text_thread.rs b/crates/assistant_text_thread/src/text_thread.rs index a50e410ab7d1bd1eb34ba367dfbfd36a7b2ec826..2bc4ceec4c243a654abf04b19b4e2ba93a1fef4f 100644 --- a/crates/assistant_text_thread/src/text_thread.rs +++ b/crates/assistant_text_thread/src/text_thread.rs @@ -16,6 +16,7 @@ use gpui::{ App, AppContext as _, Context, Entity, EventEmitter, RenderImage, SharedString, Subscription, Task, }; +use itertools::Itertools as _; use language::{AnchorRangeExt, Bias, Buffer, LanguageRegistry, OffsetRangeExt, Point, ToOffset}; use language_model::{ LanguageModel, LanguageModelCacheConfiguration, LanguageModelCompletionEvent, @@ -1853,14 +1854,17 @@ impl TextThread { } if ensure_trailing_newline - && buffer.contains_str_at(command_range_end, "\n") + && buffer + .chars_at(command_range_end) + .next() + .is_some_and(|c| c == '\n') { - let newline_offset = insert_position.saturating_sub(1); - if buffer.contains_str_at(newline_offset, "\n") + if let Some((prev_char, '\n')) = + buffer.reversed_chars_at(insert_position).next_tuple() && last_section_range.is_none_or(|last_section_range| { !last_section_range .to_offset(buffer) - .contains(&newline_offset) + .contains(&(insert_position - prev_char.len_utf8())) }) { deletions.push((command_range_end..command_range_end + 1, "")); diff --git a/crates/buffer_diff/src/buffer_diff.rs b/crates/buffer_diff/src/buffer_diff.rs index d6ae5545200bb47976554814e346be3039fa276e..52c6463b9bcccd242ef18e5f3dcb518bd335686d 100644 --- a/crates/buffer_diff/src/buffer_diff.rs +++ b/crates/buffer_diff/src/buffer_diff.rs @@ -147,6 +147,7 @@ impl std::fmt::Debug for BufferDiffInner { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_struct("BufferDiffSnapshot") .field("hunks", &self.hunks) + .field("remote_id", &self.base_text.remote_id()) .finish() } } diff --git a/crates/editor/src/selections_collection.rs b/crates/editor/src/selections_collection.rs index c1b8d11db94de7394b36ec706f42622993b63785..f8ff9da763403b0946e99a4e39c934ff43ad6634 100644 --- a/crates/editor/src/selections_collection.rs +++ b/crates/editor/src/selections_collection.rs @@ -415,6 +415,29 @@ impl SelectionsCollection { !mutable_collection.disjoint.is_empty() || mutable_collection.pending.is_some(), "There must be at least one selection" ); + if cfg!(debug_assertions) { + mutable_collection.disjoint.iter().for_each(|selection| { + assert!( + snapshot.can_resolve(&selection.start), + "disjoint selection start is not resolvable for the given snapshot:\n{selection:?}", + ); + assert!( + snapshot.can_resolve(&selection.end), + "disjoint selection end is not resolvable for the given snapshot: {selection:?}", + ); + }); + if let Some(pending) = &mutable_collection.pending { + let selection = &pending.selection; + assert!( + snapshot.can_resolve(&selection.start), + "pending selection start is not resolvable for the given snapshot: {pending:?}", + ); + assert!( + snapshot.can_resolve(&selection.end), + "pending selection end is not resolvable for the given snapshot: {pending:?}", + ); + } + } (mutable_collection.selections_changed, result) } diff --git a/crates/multi_buffer/src/anchor.rs b/crates/multi_buffer/src/anchor.rs index 57b5244b3f276265c31f1431701a2bd7d8e59aef..b8c1680574a86354d92f39c544c202642293f619 100644 --- a/crates/multi_buffer/src/anchor.rs +++ b/crates/multi_buffer/src/anchor.rs @@ -9,14 +9,33 @@ use std::{ use sum_tree::Bias; use text::BufferId; -#[derive(Clone, Copy, Eq, PartialEq, Debug, Hash)] +#[derive(Clone, Copy, Eq, PartialEq, Hash)] pub struct Anchor { + /// Invariant: If buffer id is `None`, excerpt id must be `ExcerptId::min()` or `ExcerptId::max()`. pub buffer_id: Option, pub excerpt_id: ExcerptId, pub text_anchor: text::Anchor, pub diff_base_anchor: Option, } +impl std::fmt::Debug for Anchor { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + if *self == Self::min() { + return f.write_str("Anchor::MIN"); + } + if *self == Self::max() { + return f.write_str("Anchor::MAX"); + } + + f.debug_struct("Anchor") + .field("buffer_id", &self.buffer_id) + .field("excerpt_id", &self.excerpt_id) + .field("text_anchor", &self.text_anchor) + .field("diff_base_anchor", &self.diff_base_anchor) + .finish() + } +} + impl Anchor { pub fn with_diff_base_anchor(self, diff_base_anchor: text::Anchor) -> Self { Self { @@ -30,6 +49,10 @@ impl Anchor { buffer_id: BufferId, text_anchor: text::Anchor, ) -> Self { + debug_assert!( + text_anchor.buffer_id.is_none_or(|id| id == buffer_id), + "buffer id does not match the one in the text anchor: {buffer_id:?} {text_anchor:?}", + ); Self { buffer_id: Some(buffer_id), excerpt_id, @@ -77,7 +100,12 @@ impl Anchor { if excerpt_id_cmp.is_ne() { return excerpt_id_cmp; } - if self_excerpt_id == ExcerptId::min() || self_excerpt_id == ExcerptId::max() { + if self_excerpt_id == ExcerptId::max() + && self.text_anchor == text::Anchor::MAX + && self.text_anchor == text::Anchor::MAX + && self.diff_base_anchor.is_none() + && other.diff_base_anchor.is_none() + { return Ordering::Equal; } if let Some(excerpt) = snapshot.excerpt(self_excerpt_id) { @@ -119,8 +147,8 @@ impl Anchor { && let Some(excerpt) = snapshot.excerpt(self.excerpt_id) { return Self { - buffer_id: self.buffer_id, - excerpt_id: self.excerpt_id, + buffer_id: Some(excerpt.buffer_id), + excerpt_id: excerpt.id, text_anchor: self.text_anchor.bias_left(&excerpt.buffer), diff_base_anchor: self.diff_base_anchor.map(|a| { if let Some(base_text) = snapshot @@ -143,8 +171,8 @@ impl Anchor { && let Some(excerpt) = snapshot.excerpt(self.excerpt_id) { return Self { - buffer_id: self.buffer_id, - excerpt_id: self.excerpt_id, + buffer_id: Some(excerpt.buffer_id), + excerpt_id: excerpt.id, text_anchor: self.text_anchor.bias_right(&excerpt.buffer), diff_base_anchor: self.diff_base_anchor.map(|a| { if let Some(base_text) = snapshot @@ -174,8 +202,8 @@ impl Anchor { } pub fn is_valid(&self, snapshot: &MultiBufferSnapshot) -> bool { - if *self == Anchor::min() || *self == Anchor::max() { - true + if *self == Anchor::min() || self.excerpt_id == ExcerptId::max() { + !snapshot.is_empty() } else if let Some(excerpt) = snapshot.excerpt(self.excerpt_id) { (self.text_anchor == excerpt.range.context.start || self.text_anchor == excerpt.range.context.end diff --git a/crates/multi_buffer/src/multi_buffer.rs b/crates/multi_buffer/src/multi_buffer.rs index 93fa26e02936884bc4b9dfd19bdea37455f1fd6e..7922692d30eb3a79e835f5e4b94313c3ea886a7c 100644 --- a/crates/multi_buffer/src/multi_buffer.rs +++ b/crates/multi_buffer/src/multi_buffer.rs @@ -5076,8 +5076,7 @@ impl MultiBufferSnapshot { excerpt_id: ExcerptId, text_anchor: Range, ) -> Option> { - let excerpt_id = self.latest_excerpt_id(excerpt_id); - let excerpt = self.excerpt(excerpt_id)?; + let excerpt = self.excerpt(self.latest_excerpt_id(excerpt_id))?; Some( self.anchor_in_excerpt_(excerpt, text_anchor.start)? @@ -5092,8 +5091,7 @@ impl MultiBufferSnapshot { excerpt_id: ExcerptId, text_anchor: text::Anchor, ) -> Option { - let excerpt_id = self.latest_excerpt_id(excerpt_id); - let excerpt = self.excerpt(excerpt_id)?; + let excerpt = self.excerpt(self.latest_excerpt_id(excerpt_id))?; self.anchor_in_excerpt_(excerpt, text_anchor) } @@ -5130,7 +5128,8 @@ impl MultiBufferSnapshot { } pub fn can_resolve(&self, anchor: &Anchor) -> bool { - if anchor.excerpt_id == ExcerptId::min() || anchor.excerpt_id == ExcerptId::max() { + if *anchor == Anchor::min() || anchor.excerpt_id == ExcerptId::max() { + // todo(lw): should be `!self.is_empty()` true } else if let Some(excerpt) = self.excerpt(anchor.excerpt_id) { excerpt.buffer.can_resolve(&anchor.text_anchor) @@ -5791,8 +5790,8 @@ impl MultiBufferSnapshot { .and_then(|(buffer, _)| buffer.file()) } - pub fn language_at(&self, point: T) -> Option<&Arc> { - self.point_to_buffer_offset(point) + pub fn language_at(&self, offset: T) -> Option<&Arc> { + self.point_to_buffer_offset(offset) .and_then(|(buffer, offset)| buffer.language_at(offset)) } @@ -5992,13 +5991,27 @@ impl MultiBufferSnapshot { theme: Option<&SyntaxTheme>, ) -> Option<(BufferId, Vec>)> { let anchor = self.anchor_before(offset); - let excerpt_id = anchor.excerpt_id; - let excerpt = self.excerpt(excerpt_id)?; - let buffer_id = excerpt.buffer_id; + let excerpt @ &Excerpt { + id: excerpt_id, + buffer_id, + ref buffer, + .. + } = self.excerpt(anchor.excerpt_id)?; + if cfg!(debug_assertions) { + match anchor.buffer_id { + // we clearly are hitting this according to sentry, but in what situations can this occur? + Some(anchor_buffer_id) => { + assert_eq!( + anchor_buffer_id, buffer_id, + "anchor {anchor:?} does not match with resolved excerpt {excerpt:?}" + ) + } + None => assert_eq!(anchor, Anchor::max()), + } + }; Some(( buffer_id, - excerpt - .buffer + buffer .symbols_containing(anchor.text_anchor, theme) .into_iter() .flat_map(|item| { @@ -6114,6 +6127,12 @@ impl MultiBufferSnapshot { } } + /// Returns the excerpt for the given id. The returned excerpt is guaranteed + /// to have the same excerpt id as the one passed in, with the exception of + /// `ExcerptId::max()`. + /// + /// Callers of this function should generally use the resulting excerpt's `id` field + /// afterwards. fn excerpt(&self, excerpt_id: ExcerptId) -> Option<&Excerpt> { let mut cursor = self.excerpts.cursor::>(()); let locator = self.excerpt_locator_for_id(excerpt_id); diff --git a/crates/multi_buffer/src/multi_buffer_tests.rs b/crates/multi_buffer/src/multi_buffer_tests.rs index 526c77db85a3efabb0e64b184dbed6fa90097558..9517f1f76ece2f34aa5c95eb27b408e1ef004b99 100644 --- a/crates/multi_buffer/src/multi_buffer_tests.rs +++ b/crates/multi_buffer/src/multi_buffer_tests.rs @@ -3050,7 +3050,9 @@ async fn test_random_multibuffer(cx: &mut TestAppContext, mut rng: StdRng) { for _ in 0..10 { let end_ix = rng.random_range(0..=text_rope.len()); + let end_ix = text_rope.floor_char_boundary(end_ix); let start_ix = rng.random_range(0..=end_ix); + let start_ix = text_rope.floor_char_boundary(start_ix); assert_eq!( snapshot .bytes_in_range(MultiBufferOffset(start_ix)..MultiBufferOffset(end_ix)) diff --git a/crates/multi_buffer/src/path_key.rs b/crates/multi_buffer/src/path_key.rs index 926ceff202837d13fe14350ee0334cbf4036bd89..530bb4aa6435fb9a3aa768d84a2bbcf829eb72c6 100644 --- a/crates/multi_buffer/src/path_key.rs +++ b/crates/multi_buffer/src/path_key.rs @@ -57,7 +57,7 @@ impl MultiBuffer { let snapshot = self.read(cx); let excerpt = snapshot.excerpt(*excerpt_id)?; Some(Anchor::in_buffer( - *excerpt_id, + excerpt.id, excerpt.buffer_id, excerpt.range.context.start, )) @@ -182,11 +182,16 @@ impl MultiBuffer { }; let ids_to_expand = HashSet::from_iter(ids); + let mut excerpt_id_ = None; let expanded_ranges = excerpt_ids.iter().filter_map(|excerpt_id| { let excerpt = snapshot.excerpt(*excerpt_id)?; + let excerpt_id = excerpt.id; + if excerpt_id_.is_none() { + excerpt_id_ = Some(excerpt_id); + } let mut context = excerpt.range.context.to_point(&excerpt.buffer); - if ids_to_expand.contains(excerpt_id) { + if ids_to_expand.contains(&excerpt_id) { match direction { ExpandExcerptDirection::Up => { context.start.row = context.start.row.saturating_sub(line_count); @@ -222,10 +227,10 @@ impl MultiBuffer { } merged_ranges.push(range) } - let Some(excerpt_id) = excerpt_ids.first() else { + let Some(excerpt_id) = excerpt_id_ else { continue; }; - let Some(buffer_id) = &snapshot.buffer_id_for_excerpt(*excerpt_id) else { + let Some(buffer_id) = &snapshot.buffer_id_for_excerpt(excerpt_id) else { continue; }; diff --git a/crates/remote/src/transport/wsl.rs b/crates/remote/src/transport/wsl.rs index 9fdf14d9fed6e6caf108171e292d4c2f33709ce7..3239f8813159a42a95c607a1b893845d4d5ae3c8 100644 --- a/crates/remote/src/transport/wsl.rs +++ b/crates/remote/src/transport/wsl.rs @@ -141,10 +141,6 @@ impl WslRemoteConnection { windows_path_to_wsl_path_impl(&self.connection_options, source, self.can_exec).await } - fn wsl_command(&self, program: &str, args: &[impl AsRef]) -> process::Command { - wsl_command_impl(&self.connection_options, program, args, self.can_exec) - } - async fn run_wsl_command(&self, program: &str, args: &[&str]) -> Result { run_wsl_command_impl(&self.connection_options, program, args, self.can_exec).await } @@ -345,16 +341,17 @@ impl RemoteConnection for WslRemoteConnection { if reconnect { proxy_args.push("--reconnect".to_owned()); } - let proxy_process = match self - .wsl_command("env", &proxy_args) - .kill_on_drop(true) - .spawn() - { - Ok(process) => process, - Err(error) => { - return Task::ready(Err(anyhow!("failed to spawn remote server: {}", error))); - } - }; + + let proxy_process = + match wsl_command_impl(&self.connection_options, "env", &proxy_args, self.can_exec) + .kill_on_drop(true) + .spawn() + { + Ok(process) => process, + Err(error) => { + return Task::ready(Err(anyhow!("failed to spawn remote server: {}", error))); + } + }; super::handle_rpc_messages_over_child_process_stdio( proxy_process, diff --git a/crates/rope/src/rope.rs b/crates/rope/src/rope.rs index ad39022c0d6181bd5d5f4fdfc1b84ea4a667340d..32894fb84469287fb1474efc57d8180bdee13466 100644 --- a/crates/rope/src/rope.rs +++ b/crates/rope/src/rope.rs @@ -715,10 +715,8 @@ impl<'a> Chunks<'a> { range.start }; let chunk_offset = offset - chunks.start(); - if let Some(chunk) = chunks.item() - && !chunk.text.is_char_boundary(chunk_offset) - { - panic!("byte index {} is not a char boundary", offset); + if let Some(chunk) = chunks.item() { + chunk.assert_char_boundary(chunk_offset); } Self { chunks, diff --git a/crates/text/src/anchor.rs b/crates/text/src/anchor.rs index cf2febdfc505b426fd8d224a2dc29f18d22cd1a8..63a9ff6f1863041594fba7ebea0b3feaba6b8db7 100644 --- a/crates/text/src/anchor.rs +++ b/crates/text/src/anchor.rs @@ -6,7 +6,7 @@ use std::{cmp::Ordering, fmt::Debug, ops::Range}; use sum_tree::{Bias, Dimensions}; /// A timestamped position in a buffer -#[derive(Copy, Clone, Eq, PartialEq, Debug, Hash)] +#[derive(Copy, Clone, Eq, PartialEq, Hash)] pub struct Anchor { pub timestamp: clock::Lamport, /// The byte offset in the buffer @@ -16,6 +16,24 @@ pub struct Anchor { pub buffer_id: Option, } +impl Debug for Anchor { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + if *self == Self::MIN { + return f.write_str("Anchor::MIN"); + } + if *self == Self::MAX { + return f.write_str("Anchor::MAX"); + } + + f.debug_struct("Anchor") + .field("timestamp", &self.timestamp) + .field("offset", &self.offset) + .field("bias", &self.bias) + .field("buffer_id", &self.buffer_id) + .finish() + } +} + impl Anchor { pub const MIN: Self = Self { timestamp: clock::Lamport::MIN, diff --git a/crates/text/src/text.rs b/crates/text/src/text.rs index fe9fe26f1bcc89b66753703e03f0a8bfeec628bd..5f87e5441d2bb97863b0086ac273e4d4d8acfdc9 100644 --- a/crates/text/src/text.rs +++ b/crates/text/src/text.rs @@ -2444,7 +2444,9 @@ impl BufferSnapshot { } else if bias == Bias::Right && offset == self.len() { Anchor::MAX } else { - if offset > self.visible_text.len() { + if cfg!(debug_assertions) { + self.visible_text.assert_char_boundary(offset); + } else if offset > self.visible_text.len() { panic!("offset {} is out of bounds", offset) } let (start, _, item) = self.fragments.find::(&None, &offset, bias); @@ -3137,12 +3139,9 @@ impl ToOffset for Point { impl ToOffset for usize { fn to_offset(&self, snapshot: &BufferSnapshot) -> usize { - assert!( - *self <= snapshot.len(), - "offset {} is out of range, snapshot length is {}", - self, - snapshot.len() - ); + if cfg!(debug_assertions) { + snapshot.as_rope().assert_char_boundary(*self); + } *self } } From 1cbb49864c32feae332c082f7f10aa0ab17750bc Mon Sep 17 00:00:00 2001 From: David Kleingeld Date: Tue, 25 Nov 2025 16:01:38 +0100 Subject: [PATCH 020/749] document how to do flamecharts in an easy way (#43461) Release Notes: - N/A --- docs/src/performance.md | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/docs/src/performance.md b/docs/src/performance.md index bcd9da1a59533469758d309cb19b8dec30bab012..9dff1d7f5ff0961d33169ee5c8761016d8fb7564 100644 --- a/docs/src/performance.md +++ b/docs/src/performance.md @@ -1,10 +1,19 @@ How to use our internal tools to profile and keep Zed fast. -# Tracy +# Flamechart/CPU profiling -Get a profile of the zed foreground executor +See what the CPU spends the most time on. Strongly recommend you use +[samply](https://github.com/mstange/samply). It opens an interactive profile in +the browser. See its README on how to install and run. -The profiler always runs in the background. You can save a trace from its UI or look at the results live. +# Task/Async profiling + +Get a profile of the zed foreground executor and background executors. Check if +anything is blocking the foreground too long or taking too much (clock) time in +the background. + +The profiler always runs in the background. You can save a trace from its UI or +look at the results live. ## Setup/Building the importer: From f58de2106885b121aaae6473d8e3511dcb65eabc Mon Sep 17 00:00:00 2001 From: Jason Lee Date: Tue, 25 Nov 2025 23:08:49 +0800 Subject: [PATCH 021/749] miniprofiler_ui: Improve MiniProfiler to use uniform list (#43457) Release Notes: - N/A --- - Apply uniform_list for timing list for performance. - Add paddings for window. - Add space to `ms`, before: `100ms` after `100 ms`. ## Before image ## After image --- crates/gpui/src/elements/uniform_list.rs | 7 +- crates/miniprofiler_ui/src/miniprofiler_ui.rs | 144 ++++++++++-------- 2 files changed, 84 insertions(+), 67 deletions(-) diff --git a/crates/gpui/src/elements/uniform_list.rs b/crates/gpui/src/elements/uniform_list.rs index ba002a67f3c614e614dd591d795f839e7f1ea73d..72843ea6330aaa24d9e1d6bf34d024cdeb54ad4a 100644 --- a/crates/gpui/src/elements/uniform_list.rs +++ b/crates/gpui/src/elements/uniform_list.rs @@ -11,7 +11,7 @@ use crate::{ StyleRefinement, Styled, Window, point, size, }; use smallvec::SmallVec; -use std::{cell::RefCell, cmp, ops::Range, rc::Rc}; +use std::{cell::RefCell, cmp, ops::Range, rc::Rc, usize}; use super::ListHorizontalSizingBehavior; @@ -235,6 +235,11 @@ impl UniformListScrollHandle { false } } + + /// Scroll to the bottom of the list. + pub fn scroll_to_bottom(&self) { + self.scroll_to_item(usize::MAX, ScrollStrategy::Bottom); + } } impl Styled for UniformList { diff --git a/crates/miniprofiler_ui/src/miniprofiler_ui.rs b/crates/miniprofiler_ui/src/miniprofiler_ui.rs index 5fb80b6307ba3b93b3a9c5def7b8da620fdd738c..93ccfc559c6eedc5e1be1c3ca68355aeba878a76 100644 --- a/crates/miniprofiler_ui/src/miniprofiler_ui.rs +++ b/crates/miniprofiler_ui/src/miniprofiler_ui.rs @@ -1,21 +1,22 @@ use std::{ ops::Range, path::PathBuf, + rc::Rc, time::{Duration, Instant}, }; use gpui::{ - App, AppContext, ClipboardItem, Context, Entity, Hsla, InteractiveElement, IntoElement, - ParentElement, Render, ScrollHandle, SerializedTaskTiming, SharedString, - StatefulInteractiveElement, Styled, Task, TaskTiming, TitlebarOptions, WindowBounds, - WindowHandle, WindowOptions, div, prelude::FluentBuilder, px, relative, size, + App, AppContext, ClipboardItem, Context, Div, Entity, Hsla, InteractiveElement, + ParentElement as _, Render, SerializedTaskTiming, SharedString, StatefulInteractiveElement, + Styled, Task, TaskTiming, TitlebarOptions, UniformListScrollHandle, WindowBounds, WindowHandle, + WindowOptions, div, prelude::FluentBuilder, px, relative, size, uniform_list, }; use util::ResultExt; use workspace::{ Workspace, ui::{ - ActiveTheme, Button, ButtonCommon, ButtonStyle, Checkbox, Clickable, ToggleState, Tooltip, - WithScrollbar, h_flex, v_flex, + ActiveTheme, Button, ButtonCommon, ButtonStyle, Checkbox, Clickable, Divider, + ScrollableHandle as _, ToggleState, Tooltip, WithScrollbar, h_flex, v_flex, }, }; use zed_actions::OpenPerformanceProfiler; @@ -95,7 +96,7 @@ pub struct ProfilerWindow { data: DataMode, include_self_timings: ToggleState, autoscroll: bool, - scroll_handle: ScrollHandle, + scroll_handle: UniformListScrollHandle, workspace: Option>, _refresh: Option>, } @@ -111,7 +112,7 @@ impl ProfilerWindow { data: DataMode::Realtime(None), include_self_timings: ToggleState::Unselected, autoscroll: true, - scroll_handle: ScrollHandle::new(), + scroll_handle: UniformListScrollHandle::default(), workspace: workspace_handle, _refresh: Some(Self::begin_listen(cx)), }); @@ -128,16 +129,7 @@ impl ProfilerWindow { .get_current_thread_timings(); this.update(cx, |this: &mut ProfilerWindow, cx| { - let scroll_offset = this.scroll_handle.offset(); - let max_offset = this.scroll_handle.max_offset(); - this.autoscroll = -scroll_offset.y >= (max_offset.height - px(5.0)); - this.data = DataMode::Realtime(Some(data)); - - if this.autoscroll { - this.scroll_handle.scroll_to_bottom(); - } - cx.notify(); }) .ok(); @@ -157,12 +149,7 @@ impl ProfilerWindow { } } - fn render_timing( - &self, - value_range: Range, - item: TimingBar, - cx: &App, - ) -> impl IntoElement { + fn render_timing(value_range: Range, item: TimingBar, cx: &App) -> Div { let time_ms = item.end.duration_since(item.start).as_secs_f32() * 1000f32; let remap = value_range @@ -227,10 +214,10 @@ impl ProfilerWindow { ) .child( div() - .min_w(px(60.0)) + .min_w(px(70.)) .flex_shrink_0() .text_right() - .child(format!("{:.1}ms", time_ms)), + .child(format!("{:.1} ms", time_ms)), ) } } @@ -241,15 +228,23 @@ impl Render for ProfilerWindow { window: &mut gpui::Window, cx: &mut gpui::Context, ) -> impl gpui::IntoElement { + let scroll_offset = self.scroll_handle.offset(); + let max_offset = self.scroll_handle.max_offset(); + self.autoscroll = -scroll_offset.y >= (max_offset.height - px(24.)); + if self.autoscroll { + self.scroll_handle.scroll_to_bottom(); + } + v_flex() .id("profiler") .w_full() .h_full() - .gap_2() .bg(cx.theme().colors().surface_background) .text_color(cx.theme().colors().text) .child( h_flex() + .py_2() + .px_4() .w_full() .justify_between() .child( @@ -346,53 +341,70 @@ impl Render for ProfilerWindow { let min = e[0].start; let max = e[e.len() - 1].end.unwrap_or_else(|| Instant::now()); - div.child( + let timings = Rc::new( + e.into_iter() + .filter(|timing| { + timing + .end + .unwrap_or_else(|| Instant::now()) + .duration_since(timing.start) + .as_millis() + >= 1 + }) + .filter(|timing| { + if self.include_self_timings.selected() { + true + } else { + !timing.location.file().ends_with("miniprofiler_ui.rs") + } + }) + .cloned() + .collect::>(), + ); + + div.child(Divider::horizontal()).child( v_flex() .id("timings.bars") - .overflow_scroll() .w_full() .h_full() .gap_2() - .track_scroll(&self.scroll_handle) - .on_scroll_wheel(cx.listener(|this, _, _, _cx| { - let scroll_offset = this.scroll_handle.offset(); - let max_offset = this.scroll_handle.max_offset(); - this.autoscroll = -scroll_offset.y >= (max_offset.height - px(5.0)); - })) - .children( - e.iter() - .filter(|timing| { - timing - .end - .unwrap_or_else(|| Instant::now()) - .duration_since(timing.start) - .as_millis() - >= 1 - }) - .filter(|timing| { - if self.include_self_timings.selected() { - true - } else { - !timing.location.file().ends_with("miniprofiler_ui.rs") + .child( + uniform_list("list", timings.len(), { + let timings = timings.clone(); + move |visible_range, _, cx| { + let mut items = vec![]; + for i in visible_range { + let timing = &timings[i]; + let value_range = + max.checked_sub(Duration::from_secs(10)).unwrap_or(min) + ..max; + items.push(Self::render_timing( + value_range, + TimingBar { + location: timing.location, + start: timing.start, + end: timing.end.unwrap_or_else(|| Instant::now()), + color: cx + .theme() + .accents() + .color_for_index(i as u32), + }, + cx, + )); } - }) - .enumerate() - .map(|(i, timing)| { - self.render_timing( - max.checked_sub(Duration::from_secs(10)).unwrap_or(min) - ..max, - TimingBar { - location: timing.location, - start: timing.start, - end: timing.end.unwrap_or_else(|| Instant::now()), - color: cx.theme().accents().color_for_index(i as u32), - }, - cx, - ) - }), - ), + items + } + }) + .p_4() + .on_scroll_wheel(cx.listener(|this, _, _, cx| { + this.autoscroll = false; + cx.notify(); + })) + .track_scroll(self.scroll_handle.clone()) + .size_full(), + ) + .vertical_scrollbar_for(self.scroll_handle.clone(), window, cx), ) - .vertical_scrollbar_for(self.scroll_handle.clone(), window, cx) }) } } From 9cae39449a69a7dae284c98d5bd0b55657b1cfc0 Mon Sep 17 00:00:00 2001 From: "Joseph T. Lyons" Date: Tue, 25 Nov 2025 10:53:47 -0500 Subject: [PATCH 022/749] Restructure collaboration docs (#43464) Overview - Channels - Private calls --- Up next would be to - [ ] Update any zed.dev links to point to items in this structure - [ ] Update content in these docs (would prefer to do that in a separate PR from this one) Release Notes: - N/A --- docs/src/SUMMARY.md | 5 +++-- docs/src/authentication.md | 2 +- docs/src/{ => collaboration}/channels.md | 4 +--- docs/src/collaboration/overview.md | 17 +++++++++++++++++ .../private-calls.md} | 8 ++------ docs/src/toolchains.md | 2 +- 6 files changed, 25 insertions(+), 13 deletions(-) rename docs/src/{ => collaboration}/channels.md (92%) create mode 100644 docs/src/collaboration/overview.md rename docs/src/{collaboration.md => collaboration/private-calls.md} (91%) diff --git a/docs/src/SUMMARY.md b/docs/src/SUMMARY.md index 7425e77fb42af3922ca50fbb8fae7cd8f75d9313..2f8bcd2ce8be00790866025d5de687d32aee7dcf 100644 --- a/docs/src/SUMMARY.md +++ b/docs/src/SUMMARY.md @@ -34,8 +34,9 @@ - [Command-line Interface](./command-line-interface.md) - [Outline Panel](./outline-panel.md) - [Code Completions](./completions.md) -- [Channels](./channels.md) -- [Collaboration](./collaboration.md) +- [Collaboration](./collaboration/overview.md) + - [Channels](./collaboration/channels.md) + - [Private Calls](./collaboration/private-calls.md) - [Git](./git.md) - [Debugger](./debugger.md) - [Diagnostics](./diagnostics.md) diff --git a/docs/src/authentication.md b/docs/src/authentication.md index 6d05567e3198ed5180b65dc0fb5f470baa679f9e..0ea97040a0ae2023143beb5a83d15cd9e28c9786 100644 --- a/docs/src/authentication.md +++ b/docs/src/authentication.md @@ -4,7 +4,7 @@ Signing in to Zed is not required. You can use most features you'd expect in a c ## What Features Require Signing In? -1. All real-time [collaboration features](./collaboration.md). +1. All real-time [collaboration features](./collaboration/overview.md). 2. [LLM-powered features](./ai/overview.md), if you are using Zed as the provider of your LLM models. To use AI without signing in, you can [bring and configure your own API keys](./ai/llm-providers.md#use-your-own-keys). ## Signing In diff --git a/docs/src/channels.md b/docs/src/collaboration/channels.md similarity index 92% rename from docs/src/channels.md rename to docs/src/collaboration/channels.md index afd97cdabc51f8c54ffd3f85d02c7aa0764d2f8b..bc723d73dedf16d2f75179f9203cdbf473bebbbb 100644 --- a/docs/src/channels.md +++ b/docs/src/collaboration/channels.md @@ -1,7 +1,5 @@ # Channels -At Zed we believe that great things are built by great people working together. We have designed Zed to help every individual work faster and to help teams of people work together more effectively. - ## Overview Channels provide a way to streamline collaborating for software engineers in many ways, but particularly: @@ -27,7 +25,7 @@ After joining a channel, you can `Share` a project with the other people there. When you are editing someone else’s project, you still have the full power of the editor at your fingertips, you can jump to definitions, use the AI assistant, and see any diagnostic errors. This is extremely powerful for pairing, as one of you can be implementing the current method while the other is reading and researching the correct solution to the next problem. And, because you have your own config running, it feels like you’re using your own machine. -See [our collaboration documentation](./collaboration.md) for more details about how this works. +See [our collaboration documentation](./private-calls.md) for more details about how this works. ## Notes diff --git a/docs/src/collaboration/overview.md b/docs/src/collaboration/overview.md new file mode 100644 index 0000000000000000000000000000000000000000..8acbecc372cecee7fb87d40685b3a08eb6e046f6 --- /dev/null +++ b/docs/src/collaboration/overview.md @@ -0,0 +1,17 @@ +# Collaboration + +At Zed, we believe that great things are built by great people working together. +We have designed Zed to help individuals work faster and help teams of people work together more effectively. +Zed has two mechanisms for collaborating: + +1. [Channels](./channels.md): Ongoing project rooms where team members can share projects, collaborate on code, and maintain ambient awareness of what everyone is working on. +1. [Private Calls](./private-calls.md): Ad-hoc private collaboration with those in your contacts list. + +You will need to [sign in](../authentication.md#signing-in) in order to begin using Zed's collaboration features. + +--- + +> Note: Only collaborate with people that you trust. +> Since sharing a project gives them access to your local file system, you should not share projects with people you do not trust; they could potentially do some nasty things. +> +> In the future, we will do more to prevent this type of access beyond the shared project and add more control over what collaborators can do, but for now, only collaborate with people you trust. diff --git a/docs/src/collaboration.md b/docs/src/collaboration/private-calls.md similarity index 91% rename from docs/src/collaboration.md rename to docs/src/collaboration/private-calls.md index 8992c7d6ca0185e08ac3923359b0dee9a2fadbfe..8ea4790688f055074c5afcf4eb1d6d63ee49d868 100644 --- a/docs/src/collaboration.md +++ b/docs/src/collaboration/private-calls.md @@ -1,8 +1,4 @@ -# Collaboration - -Only collaborate with people that you trust. Since sharing a project gives them access to your local file system, you should not share projects with people you do not trust; they could potentially do some nasty things. - -In the future, we will do more to prevent this type of access beyond the shared project and add more control over what collaborators can do, but for now, only collaborate with people you trust. +# Private Calls ## Adding a collaborator to a call @@ -28,7 +24,7 @@ If someone you want to collaborate with has not yet signed up for Zed, they will ### Voice chat -When joining a call, Zed will automatically share your microphone with other users in the call, if your OS allows it. This isn't tied to your project. You can disable this for your client via the [`mute_on_join`](./configuring-zed.md#calls) setting. +When joining a call, Zed will automatically share your microphone with other users in the call, if your OS allows it. This isn't tied to your project. You can disable this for your client via the [`mute_on_join`](../configuring-zed.md#calls) setting. ## Collaborating on a project diff --git a/docs/src/toolchains.md b/docs/src/toolchains.md index 68e7baa8cf225d85862eadb0ef02674f84b59fd2..f9f5f3fe0e8164b0580786795df0b286a2a7760a 100644 --- a/docs/src/toolchains.md +++ b/docs/src/toolchains.md @@ -8,7 +8,7 @@ With toolchain selector, you don't need to spend time configuring your language You can even select different toolchains for different subprojects within your Zed project. A definition of a subproject is language-specific. In collaborative scenarios, only the project owner can see and modify an active toolchain. -In [remote projects](./remote-development.md), you can use the toolchain selector to control the active toolchain on the SSH host. When [sharing your project](./collaboration.md), the toolchain selector is not available to guests. +In [remote projects](./remote-development.md), you can use the toolchain selector to control the active toolchain on the SSH host. When [sharing your project](./collaboration/overview.md), the toolchain selector is not available to guests. ## Why do we need toolchains? From ab80ef18458417ac010f5b6de9c51be3bca9fac3 Mon Sep 17 00:00:00 2001 From: Bennet Bo Fenner Date: Tue, 25 Nov 2025 17:03:21 +0100 Subject: [PATCH 023/749] mcp: Fix `source` property showing up as undefined in settings (#43417) Follow up to #39021. image - Add migration to remove `source` tag because `ContextServerSettings` is now untagged - Fix typos in context server modal - PR seems to have removed the `test_action_namespaces` test, which I brought back in this PR Release Notes: - Fixed an issue where the `source` property of MCP settings would show up as unrecognised --- crates/agent/src/tests/mod.rs | 2 +- .../configure_context_server_modal.rs | 11 +- .../src/wasm_host/wit/since_v0_6_0.rs | 2 +- crates/migrator/src/migrations.rs | 6 + .../src/migrations/m_2025_11_25/settings.rs | 17 +++ crates/migrator/src/migrator.rs | 55 +++++++- crates/project/src/context_server_store.rs | 14 +- crates/project/src/project_settings.rs | 38 +++--- .../settings/src/settings_content/project.rs | 4 +- crates/settings/src/vscode_import.rs | 2 +- crates/zed/src/zed.rs | 127 ++++++++++++++++++ docs/src/ai/mcp.md | 4 +- 12 files changed, 237 insertions(+), 45 deletions(-) create mode 100644 crates/migrator/src/migrations/m_2025_11_25/settings.rs diff --git a/crates/agent/src/tests/mod.rs b/crates/agent/src/tests/mod.rs index efba471f1a927446aa96b1c1426c60b42b725b89..b33080671980eb28c7900aea4bb0942d152a054a 100644 --- a/crates/agent/src/tests/mod.rs +++ b/crates/agent/src/tests/mod.rs @@ -2553,7 +2553,7 @@ fn setup_context_server( let mut settings = ProjectSettings::get_global(cx).clone(); settings.context_servers.insert( name.into(), - project::project_settings::ContextServerSettings::Custom { + project::project_settings::ContextServerSettings::Stdio { enabled: true, command: ContextServerCommand { path: "somebinary".into(), diff --git a/crates/agent_ui/src/agent_configuration/configure_context_server_modal.rs b/crates/agent_ui/src/agent_configuration/configure_context_server_modal.rs index ebea8c25fb68a8a5055d4ccaa8b9068583c4b91c..a93df3839d98d95e2f91833078dbe96bc3fb8889 100644 --- a/crates/agent_ui/src/agent_configuration/configure_context_server_modal.rs +++ b/crates/agent_ui/src/agent_configuration/configure_context_server_modal.rs @@ -182,7 +182,7 @@ impl ConfigurationSource { parse_input(&editor.read(cx).text(cx)).map(|(id, command)| { ( id, - ContextServerSettings::Custom { + ContextServerSettings::Stdio { enabled: true, command, }, @@ -403,7 +403,7 @@ impl ConfigureContextServerModal { window.spawn(cx, async move |cx| { let target = match settings { - ContextServerSettings::Custom { + ContextServerSettings::Stdio { enabled: _, command, } => Some(ConfigurationTarget::Existing { @@ -635,7 +635,6 @@ impl ConfigureContextServerModal { } fn render_modal_content(&self, cx: &App) -> AnyElement { - // All variants now use single editor approach let editor = match &self.source { ConfigurationSource::New { editor, .. } => editor, ConfigurationSource::Existing { editor, .. } => editor, @@ -712,12 +711,12 @@ impl ConfigureContextServerModal { ) } else if let ConfigurationSource::New { is_http, .. } = &self.source { let label = if *is_http { - "Run command" + "Configure Local" } else { - "Connect via HTTP" + "Configure Remote" }; let tooltip = if *is_http { - "Configure an MCP serevr that runs on stdin/stdout." + "Configure an MCP server that runs on stdin/stdout." } else { "Configure an MCP server that you connect to over HTTP" }; diff --git a/crates/extension_host/src/wasm_host/wit/since_v0_6_0.rs b/crates/extension_host/src/wasm_host/wit/since_v0_6_0.rs index dd0548d9d182e4b81e8490476eef2420f0e6c13d..c96e5216c4703df2a73e1a0bc27c90d13adbb782 100644 --- a/crates/extension_host/src/wasm_host/wit/since_v0_6_0.rs +++ b/crates/extension_host/src/wasm_host/wit/since_v0_6_0.rs @@ -972,7 +972,7 @@ impl ExtensionImports for WasmState { }); match settings { - project::project_settings::ContextServerSettings::Custom { + project::project_settings::ContextServerSettings::Stdio { enabled: _, command, } => Ok(serde_json::to_string(&settings::ContextServerSettings { diff --git a/crates/migrator/src/migrations.rs b/crates/migrator/src/migrations.rs index 8a481c734f9efcce4f6342789df6ff1d7fc01562..07b7d3f0afb141d4dde77b883ca97f4df67cdd6c 100644 --- a/crates/migrator/src/migrations.rs +++ b/crates/migrator/src/migrations.rs @@ -147,3 +147,9 @@ pub(crate) mod m_2025_11_20 { pub(crate) use settings::SETTINGS_PATTERNS; } + +pub(crate) mod m_2025_11_25 { + mod settings; + + pub(crate) use settings::remove_context_server_source; +} diff --git a/crates/migrator/src/migrations/m_2025_11_25/settings.rs b/crates/migrator/src/migrations/m_2025_11_25/settings.rs new file mode 100644 index 0000000000000000000000000000000000000000..944eee8a119714b7d9839e2ddf13ec61db4c18d2 --- /dev/null +++ b/crates/migrator/src/migrations/m_2025_11_25/settings.rs @@ -0,0 +1,17 @@ +use anyhow::Result; +use serde_json::Value; + +pub fn remove_context_server_source(settings: &mut Value) -> Result<()> { + if let Some(obj) = settings.as_object_mut() { + if let Some(context_servers) = obj.get_mut("context_servers") { + if let Some(servers) = context_servers.as_object_mut() { + for (_, server) in servers.iter_mut() { + if let Some(server_obj) = server.as_object_mut() { + server_obj.remove("source"); + } + } + } + } + } + Ok(()) +} diff --git a/crates/migrator/src/migrator.rs b/crates/migrator/src/migrator.rs index fd30bf24982d2625e4f40669aa2e0142b8634186..444ebadfb615628e91422ed62c351722d8cb9300 100644 --- a/crates/migrator/src/migrator.rs +++ b/crates/migrator/src/migrator.rs @@ -223,6 +223,7 @@ pub fn migrate_settings(text: &str) -> Result> { migrations::m_2025_11_20::SETTINGS_PATTERNS, &SETTINGS_QUERY_2025_11_20, ), + MigrationType::Json(migrations::m_2025_11_25::remove_context_server_source), ]; run_migrations(text, migrations) } @@ -1334,7 +1335,6 @@ mod tests { r#"{ "context_servers": { "some-mcp-server": { - "source": "custom", "command": { "path": "npx", "args": [ @@ -1354,7 +1354,6 @@ mod tests { r#"{ "context_servers": { "some-mcp-server": { - "source": "custom", "command": "npx", "args": [ "-y", @@ -1376,7 +1375,6 @@ mod tests { r#"{ "context_servers": { "server-with-extras": { - "source": "custom", "command": { "path": "/usr/bin/node", "args": ["server.js"] @@ -1389,7 +1387,6 @@ mod tests { r#"{ "context_servers": { "server-with-extras": { - "source": "custom", "command": "/usr/bin/node", "args": ["server.js"], "settings": {} @@ -1404,7 +1401,6 @@ mod tests { r#"{ "context_servers": { "simple-server": { - "source": "custom", "command": { "path": "simple-mcp-server" } @@ -1415,7 +1411,6 @@ mod tests { r#"{ "context_servers": { "simple-server": { - "source": "custom", "command": "simple-mcp-server" } } @@ -2311,4 +2306,52 @@ mod tests { ), ); } + + #[test] + fn test_remove_context_server_source() { + assert_migrate_settings( + &r#" + { + "context_servers": { + "extension_server": { + "source": "extension", + "settings": { + "foo": "bar" + } + }, + "custom_server": { + "source": "custom", + "command": "foo", + "args": ["bar"], + "env": { + "FOO": "BAR" + } + }, + } + } + "# + .unindent(), + Some( + &r#" + { + "context_servers": { + "extension_server": { + "settings": { + "foo": "bar" + } + }, + "custom_server": { + "command": "foo", + "args": ["bar"], + "env": { + "FOO": "BAR" + } + }, + } + } + "# + .unindent(), + ), + ); + } } diff --git a/crates/project/src/context_server_store.rs b/crates/project/src/context_server_store.rs index efc2bbf686a273fe18ca3a34f071176d07532981..342a59ab7d5530e8f2268f1c4b72ea44f302f807 100644 --- a/crates/project/src/context_server_store.rs +++ b/crates/project/src/context_server_store.rs @@ -122,7 +122,7 @@ impl ContextServerConfiguration { cx: &AsyncApp, ) -> Option { match settings { - ContextServerSettings::Custom { + ContextServerSettings::Stdio { enabled: _, command, } => Some(ContextServerConfiguration::Custom { command }), @@ -1003,7 +1003,7 @@ mod tests { ), ( server_2_id.0.clone(), - settings::ContextServerSettingsContent::Custom { + settings::ContextServerSettingsContent::Stdio { enabled: true, command: ContextServerCommand { path: "somebinary".into(), @@ -1044,7 +1044,7 @@ mod tests { ), ( server_2_id.0.clone(), - settings::ContextServerSettingsContent::Custom { + settings::ContextServerSettingsContent::Stdio { enabled: true, command: ContextServerCommand { path: "somebinary".into(), @@ -1127,7 +1127,7 @@ mod tests { json!({"code.rs": ""}), vec![( SERVER_1_ID.into(), - ContextServerSettings::Custom { + ContextServerSettings::Stdio { enabled: true, command: ContextServerCommand { path: "somebinary".into(), @@ -1180,7 +1180,7 @@ mod tests { set_context_server_configuration( vec![( server_1_id.0.clone(), - settings::ContextServerSettingsContent::Custom { + settings::ContextServerSettingsContent::Stdio { enabled: false, command: ContextServerCommand { path: "somebinary".into(), @@ -1209,7 +1209,7 @@ mod tests { set_context_server_configuration( vec![( server_1_id.0.clone(), - settings::ContextServerSettingsContent::Custom { + settings::ContextServerSettingsContent::Stdio { enabled: true, command: ContextServerCommand { path: "somebinary".into(), @@ -1328,7 +1328,7 @@ mod tests { } fn dummy_server_settings() -> ContextServerSettings { - ContextServerSettings::Custom { + ContextServerSettings::Stdio { enabled: true, command: ContextServerCommand { path: "somebinary".into(), diff --git a/crates/project/src/project_settings.rs b/crates/project/src/project_settings.rs index 05d5612f7db5b35e3c2fe6513cc45a05ddaac68c..b7dadc52f74f4800741f5cf537ac9f52c09643e3 100644 --- a/crates/project/src/project_settings.rs +++ b/crates/project/src/project_settings.rs @@ -117,7 +117,7 @@ pub struct GlobalLspSettings { #[derive(Deserialize, Serialize, Clone, PartialEq, Eq, JsonSchema, Debug)] #[serde(tag = "source", rename_all = "snake_case")] pub enum ContextServerSettings { - Custom { + Stdio { /// Whether the context server is enabled. #[serde(default = "default_true")] enabled: bool, @@ -125,16 +125,6 @@ pub enum ContextServerSettings { #[serde(flatten)] command: ContextServerCommand, }, - Extension { - /// Whether the context server is enabled. - #[serde(default = "default_true")] - enabled: bool, - /// The settings for this context server specified by the extension. - /// - /// Consult the documentation for the context server to see what settings - /// are supported. - settings: serde_json::Value, - }, Http { /// Whether the context server is enabled. #[serde(default = "default_true")] @@ -145,13 +135,23 @@ pub enum ContextServerSettings { #[serde(skip_serializing_if = "HashMap::is_empty", default)] headers: HashMap, }, + Extension { + /// Whether the context server is enabled. + #[serde(default = "default_true")] + enabled: bool, + /// The settings for this context server specified by the extension. + /// + /// Consult the documentation for the context server to see what settings + /// are supported. + settings: serde_json::Value, + }, } impl From for ContextServerSettings { fn from(value: settings::ContextServerSettingsContent) -> Self { match value { - settings::ContextServerSettingsContent::Custom { enabled, command } => { - ContextServerSettings::Custom { enabled, command } + settings::ContextServerSettingsContent::Stdio { enabled, command } => { + ContextServerSettings::Stdio { enabled, command } } settings::ContextServerSettingsContent::Extension { enabled, settings } => { ContextServerSettings::Extension { enabled, settings } @@ -171,8 +171,8 @@ impl From for ContextServerSettings { impl Into for ContextServerSettings { fn into(self) -> settings::ContextServerSettingsContent { match self { - ContextServerSettings::Custom { enabled, command } => { - settings::ContextServerSettingsContent::Custom { enabled, command } + ContextServerSettings::Stdio { enabled, command } => { + settings::ContextServerSettingsContent::Stdio { enabled, command } } ContextServerSettings::Extension { enabled, settings } => { settings::ContextServerSettingsContent::Extension { enabled, settings } @@ -200,17 +200,17 @@ impl ContextServerSettings { pub fn enabled(&self) -> bool { match self { - ContextServerSettings::Custom { enabled, .. } => *enabled, - ContextServerSettings::Extension { enabled, .. } => *enabled, + ContextServerSettings::Stdio { enabled, .. } => *enabled, ContextServerSettings::Http { enabled, .. } => *enabled, + ContextServerSettings::Extension { enabled, .. } => *enabled, } } pub fn set_enabled(&mut self, enabled: bool) { match self { - ContextServerSettings::Custom { enabled: e, .. } => *e = enabled, - ContextServerSettings::Extension { enabled: e, .. } => *e = enabled, + ContextServerSettings::Stdio { enabled: e, .. } => *e = enabled, ContextServerSettings::Http { enabled: e, .. } => *e = enabled, + ContextServerSettings::Extension { enabled: e, .. } => *e = enabled, } } } diff --git a/crates/settings/src/settings_content/project.rs b/crates/settings/src/settings_content/project.rs index ccad50ce8827f6d7d59a45b0fb2efd4abb5257b7..0076721228b3b8c6b8d5e6bfd85fc1d25f00c5e3 100644 --- a/crates/settings/src/settings_content/project.rs +++ b/crates/settings/src/settings_content/project.rs @@ -192,7 +192,7 @@ pub struct SessionSettingsContent { #[derive(Deserialize, Serialize, Clone, PartialEq, Eq, JsonSchema, MergeFrom, Debug)] #[serde(untagged, rename_all = "snake_case")] pub enum ContextServerSettingsContent { - Custom { + Stdio { /// Whether the context server is enabled. #[serde(default = "default_true")] enabled: bool, @@ -225,7 +225,7 @@ pub enum ContextServerSettingsContent { impl ContextServerSettingsContent { pub fn set_enabled(&mut self, enabled: bool) { match self { - ContextServerSettingsContent::Custom { + ContextServerSettingsContent::Stdio { enabled: custom_enabled, .. } => { diff --git a/crates/settings/src/vscode_import.rs b/crates/settings/src/vscode_import.rs index 22081727d8ff767b861a776f0a821e3b4a8d5fdf..7ba07395964266e303965733bdccda42ba7df60e 100644 --- a/crates/settings/src/vscode_import.rs +++ b/crates/settings/src/vscode_import.rs @@ -568,7 +568,7 @@ impl VsCodeSettings { .filter_map(|(k, v)| { Some(( k.clone().into(), - ContextServerSettingsContent::Custom { + ContextServerSettingsContent::Stdio { enabled: true, command: serde_json::from_value::(v.clone()) .ok() diff --git a/crates/zed/src/zed.rs b/crates/zed/src/zed.rs index f6348a8cf22bda6441bca6d31abe8823c1d2215a..180f53a46b93eaa93ea355ece256807a16d03f43 100644 --- a/crates/zed/src/zed.rs +++ b/crates/zed/src/zed.rs @@ -4686,6 +4686,133 @@ mod tests { }); } + /// Checks that action namespaces are the expected set. The purpose of this is to prevent typos + /// and let you know when introducing a new namespace. + #[gpui::test] + async fn test_action_namespaces(cx: &mut gpui::TestAppContext) { + use itertools::Itertools; + + init_keymap_test(cx); + cx.update(|cx| { + let all_actions = cx.all_action_names(); + + let mut actions_without_namespace = Vec::new(); + let all_namespaces = all_actions + .iter() + .filter_map(|action_name| { + let namespace = action_name + .split("::") + .collect::>() + .into_iter() + .rev() + .skip(1) + .rev() + .join("::"); + if namespace.is_empty() { + actions_without_namespace.push(*action_name); + } + if &namespace == "test_only" || &namespace == "stories" { + None + } else { + Some(namespace) + } + }) + .sorted() + .dedup() + .collect::>(); + assert_eq!(actions_without_namespace, Vec::<&str>::new()); + + let expected_namespaces = vec![ + "action", + "activity_indicator", + "agent", + #[cfg(not(target_os = "macos"))] + "app_menu", + "assistant", + "assistant2", + "auto_update", + "bedrock", + "branches", + "buffer_search", + "channel_modal", + "cli", + "client", + "collab", + "collab_panel", + "command_palette", + "console", + "context_server", + "copilot", + "debug_panel", + "debugger", + "dev", + "diagnostics", + "edit_prediction", + "editor", + "feedback", + "file_finder", + "git", + "git_onboarding", + "git_panel", + "go_to_line", + "icon_theme_selector", + "journal", + "keymap_editor", + "keystroke_input", + "language_selector", + "line_ending_selector", + "lsp_tool", + "markdown", + "menu", + "notebook", + "notification_panel", + "onboarding", + "outline", + "outline_panel", + "pane", + "panel", + "picker", + "project_panel", + "project_search", + "project_symbols", + "projects", + "repl", + "rules_library", + "search", + "settings_editor", + "settings_profile_selector", + "snippets", + "stash_picker", + "supermaven", + "svg", + "syntax_tree_view", + "tab_switcher", + "task", + "terminal", + "terminal_panel", + "theme_selector", + "toast", + "toolchain", + "variable_list", + "vim", + "window", + "workspace", + "zed", + "zed_actions", + "zed_predict_onboarding", + "zeta", + ]; + assert_eq!( + all_namespaces, + expected_namespaces + .into_iter() + .map(|namespace| namespace.to_string()) + .sorted() + .collect::>() + ); + }); + } + #[gpui::test] fn test_bundled_settings_and_themes(cx: &mut App) { cx.text_system() diff --git a/docs/src/ai/mcp.md b/docs/src/ai/mcp.md index d8d2de2a014459ddeed0f2a0fe92c2cbe84045e4..956477a1c2872d9371f770c3a767e5a77bead9fa 100644 --- a/docs/src/ai/mcp.md +++ b/docs/src/ai/mcp.md @@ -40,12 +40,12 @@ You can connect them by adding their commands directly to your `settings.json`, ```json [settings] { "context_servers": { - "run-command": { + "local-mcp-server": { "command": "some-command", "args": ["arg-1", "arg-2"], "env": {} }, - "over-http": { + "remote-mcp-server": { "url": "custom", "headers": { "Authorization": "Bearer " } } From fafe1afa61b6e1c6a02644a3ff889cd7c0aecda5 Mon Sep 17 00:00:00 2001 From: Lukas Wirth Date: Tue, 25 Nov 2025 17:13:16 +0100 Subject: [PATCH 024/749] multi_buffer: Remove redundant buffer id field (#43459) It is easy for us to get the two fields out of sync causing weird problems, there is no reason to have both here so. Release Notes: - N/A *or* Added/Fixed/Improved ... Co-authored by: Antonio Scandurra --- crates/acp_thread/src/acp_thread.rs | 6 +- crates/acp_thread/src/diff.rs | 18 ++- crates/action_log/src/action_log.rs | 30 +++-- crates/agent/src/edit_agent.rs | 33 +++-- crates/agent/src/tools/read_file_tool.rs | 4 +- crates/agent_ui/src/acp/thread_view.rs | 15 ++- crates/agent_ui/src/agent_diff.rs | 2 +- crates/agent_ui/src/inline_assistant.rs | 1 - .../assistant_text_thread/src/text_thread.rs | 15 +-- crates/buffer_diff/src/buffer_diff.rs | 70 +++++++--- crates/collab/src/tests/editor_tests.rs | 10 +- crates/diagnostics/src/diagnostic_renderer.rs | 2 +- crates/diagnostics/src/diagnostics.rs | 2 +- crates/editor/src/display_map/block_map.rs | 2 +- crates/editor/src/editor.rs | 35 ++--- crates/editor/src/editor_tests.rs | 9 +- crates/editor/src/element.rs | 2 +- crates/editor/src/inlays/inlay_hints.rs | 19 ++- crates/editor/src/items.rs | 49 +++---- crates/editor/src/linked_editing_ranges.rs | 4 +- crates/editor/src/lsp_ext.rs | 2 +- crates/editor/src/rust_analyzer_ext.rs | 18 ++- crates/editor/src/test/editor_test_context.rs | 12 +- crates/git_ui/src/commit_view.rs | 6 +- crates/git_ui/src/project_diff.rs | 14 +- crates/language/src/buffer_tests.rs | 2 +- crates/language/src/syntax_map.rs | 6 +- crates/multi_buffer/src/anchor.rs | 56 ++++---- crates/multi_buffer/src/multi_buffer.rs | 123 ++++++++---------- crates/multi_buffer/src/multi_buffer_tests.rs | 7 +- crates/multi_buffer/src/path_key.rs | 7 +- crates/outline_panel/src/outline_panel.rs | 3 +- crates/project/src/lsp_store.rs | 2 +- crates/project/src/project.rs | 3 +- crates/sum_tree/src/tree_map.rs | 6 + crates/text/src/anchor.rs | 52 +++++++- crates/text/src/text.rs | 25 ++-- crates/vim/src/motion.rs | 10 +- crates/vim/src/state.rs | 2 +- 39 files changed, 378 insertions(+), 306 deletions(-) diff --git a/crates/acp_thread/src/acp_thread.rs b/crates/acp_thread/src/acp_thread.rs index 56e33fda47f095eef1873f7a0724b021e88a0bdc..a42eaa491f7f98e9965cd3aba801690ed996a39a 100644 --- a/crates/acp_thread/src/acp_thread.rs +++ b/crates/acp_thread/src/acp_thread.rs @@ -347,13 +347,13 @@ impl ToolCall { let buffer = buffer.await.log_err()?; let position = buffer .update(cx, |buffer, _| { + let snapshot = buffer.snapshot(); if let Some(row) = location.line { - let snapshot = buffer.snapshot(); let column = snapshot.indent_size_for_line(row).len; let point = snapshot.clip_point(Point::new(row, column), Bias::Left); snapshot.anchor_before(point) } else { - Anchor::MIN + Anchor::min_for_buffer(snapshot.remote_id()) } }) .ok()?; @@ -2120,7 +2120,7 @@ impl AcpThread { position: edits .last() .map(|(range, _)| range.end) - .unwrap_or(Anchor::MIN), + .unwrap_or(Anchor::min_for_buffer(buffer.read(cx).remote_id())), }), cx, ); diff --git a/crates/acp_thread/src/diff.rs b/crates/acp_thread/src/diff.rs index 055b2f7fb86ffe9d7f12459b6b16405ce77815a0..f17e9d0fce404483ae99efc95bf666586c1f644b 100644 --- a/crates/acp_thread/src/diff.rs +++ b/crates/acp_thread/src/diff.rs @@ -50,9 +50,14 @@ impl Diff { let hunk_ranges = { let buffer = buffer.read(cx); let diff = diff.read(cx); - diff.hunks_intersecting_range(Anchor::MIN..Anchor::MAX, buffer, cx) - .map(|diff_hunk| diff_hunk.buffer_range.to_point(buffer)) - .collect::>() + diff.hunks_intersecting_range( + Anchor::min_for_buffer(buffer.remote_id()) + ..Anchor::max_for_buffer(buffer.remote_id()), + buffer, + cx, + ) + .map(|diff_hunk| diff_hunk.buffer_range.to_point(buffer)) + .collect::>() }; multibuffer.set_excerpts_for_path( @@ -316,7 +321,12 @@ impl PendingDiff { let buffer = self.new_buffer.read(cx); let diff = self.diff.read(cx); let mut ranges = diff - .hunks_intersecting_range(Anchor::MIN..Anchor::MAX, buffer, cx) + .hunks_intersecting_range( + Anchor::min_for_buffer(buffer.remote_id()) + ..Anchor::max_for_buffer(buffer.remote_id()), + buffer, + cx, + ) .map(|diff_hunk| diff_hunk.buffer_range.to_point(buffer)) .collect::>(); ranges.extend( diff --git a/crates/action_log/src/action_log.rs b/crates/action_log/src/action_log.rs index 78265007a5abe3e724166610013ade776d82dbeb..80c9438bc9f8051cb58357e56a82b5307fd20b75 100644 --- a/crates/action_log/src/action_log.rs +++ b/crates/action_log/src/action_log.rs @@ -409,9 +409,11 @@ impl ActionLog { let new_diff_base = new_diff_base.clone(); async move { let mut unreviewed_edits = Patch::default(); - for hunk in diff_snapshot - .hunks_intersecting_range(Anchor::MIN..Anchor::MAX, &buffer_snapshot) - { + for hunk in diff_snapshot.hunks_intersecting_range( + Anchor::min_for_buffer(buffer_snapshot.remote_id()) + ..Anchor::max_for_buffer(buffer_snapshot.remote_id()), + &buffer_snapshot, + ) { let old_range = new_diff_base .offset_to_point(hunk.diff_base_byte_range.start) ..new_diff_base.offset_to_point(hunk.diff_base_byte_range.end); @@ -732,12 +734,10 @@ impl ActionLog { cx: &mut Context, ) -> Task<()> { let futures = self.changed_buffers(cx).into_keys().map(|buffer| { - let reject = self.reject_edits_in_ranges( - buffer, - vec![Anchor::MIN..Anchor::MAX], - telemetry.clone(), - cx, - ); + let buffer_ranges = vec![Anchor::min_max_range_for_buffer( + buffer.read(cx).remote_id(), + )]; + let reject = self.reject_edits_in_ranges(buffer, buffer_ranges, telemetry.clone(), cx); async move { reject.await.log_err(); @@ -2010,7 +2010,8 @@ mod tests { // User accepts the single hunk action_log.update(cx, |log, cx| { - log.keep_edits_in_range(buffer.clone(), Anchor::MIN..Anchor::MAX, None, cx) + let buffer_range = Anchor::min_max_range_for_buffer(buffer.read(cx).remote_id()); + log.keep_edits_in_range(buffer.clone(), buffer_range, None, cx) }); cx.run_until_parked(); assert_eq!(unreviewed_hunks(&action_log, cx), vec![]); @@ -2031,7 +2032,14 @@ mod tests { // User rejects the hunk action_log .update(cx, |log, cx| { - log.reject_edits_in_ranges(buffer.clone(), vec![Anchor::MIN..Anchor::MAX], None, cx) + log.reject_edits_in_ranges( + buffer.clone(), + vec![Anchor::min_max_range_for_buffer( + buffer.read(cx).remote_id(), + )], + None, + cx, + ) }) .await .unwrap(); diff --git a/crates/agent/src/edit_agent.rs b/crates/agent/src/edit_agent.rs index e5b1d1e3871ecb0070f60f5f382196482e24963a..5ea04729a49afae944c5e7ca88ad67791e18b6f3 100644 --- a/crates/agent/src/edit_agent.rs +++ b/crates/agent/src/edit_agent.rs @@ -172,14 +172,14 @@ impl EditAgent { project.set_agent_location( Some(AgentLocation { buffer: buffer.downgrade(), - position: language::Anchor::MAX, + position: language::Anchor::max_for_buffer(buffer.read(cx).remote_id()), }), cx, ) }); output_events_tx .unbounded_send(EditAgentOutputEvent::Edited( - language::Anchor::MIN..language::Anchor::MAX, + Anchor::min_max_range_for_buffer(buffer.read(cx).remote_id()), )) .ok(); })?; @@ -187,7 +187,7 @@ impl EditAgent { while let Some(event) = parse_rx.next().await { match event? { CreateFileParserEvent::NewTextChunk { chunk } => { - cx.update(|cx| { + let buffer_id = cx.update(|cx| { buffer.update(cx, |buffer, cx| buffer.append(chunk, cx)); self.action_log .update(cx, |log, cx| log.buffer_edited(buffer.clone(), cx)); @@ -195,15 +195,18 @@ impl EditAgent { project.set_agent_location( Some(AgentLocation { buffer: buffer.downgrade(), - position: language::Anchor::MAX, + position: language::Anchor::max_for_buffer( + buffer.read(cx).remote_id(), + ), }), cx, ) }); + buffer.read(cx).remote_id() })?; output_events_tx .unbounded_send(EditAgentOutputEvent::Edited( - language::Anchor::MIN..language::Anchor::MAX, + Anchor::min_max_range_for_buffer(buffer_id), )) .ok(); } @@ -1200,7 +1203,9 @@ mod tests { project.read_with(cx, |project, _| project.agent_location()), Some(AgentLocation { buffer: buffer.downgrade(), - position: language::Anchor::MAX + position: language::Anchor::max_for_buffer( + cx.update(|cx| buffer.read(cx).remote_id()) + ), }) ); @@ -1218,7 +1223,9 @@ mod tests { project.read_with(cx, |project, _| project.agent_location()), Some(AgentLocation { buffer: buffer.downgrade(), - position: language::Anchor::MAX + position: language::Anchor::max_for_buffer( + cx.update(|cx| buffer.read(cx).remote_id()) + ), }) ); @@ -1236,7 +1243,9 @@ mod tests { project.read_with(cx, |project, _| project.agent_location()), Some(AgentLocation { buffer: buffer.downgrade(), - position: language::Anchor::MAX + position: language::Anchor::max_for_buffer( + cx.update(|cx| buffer.read(cx).remote_id()) + ), }) ); @@ -1254,7 +1263,9 @@ mod tests { project.read_with(cx, |project, _| project.agent_location()), Some(AgentLocation { buffer: buffer.downgrade(), - position: language::Anchor::MAX + position: language::Anchor::max_for_buffer( + cx.update(|cx| buffer.read(cx).remote_id()) + ), }) ); @@ -1269,7 +1280,9 @@ mod tests { project.read_with(cx, |project, _| project.agent_location()), Some(AgentLocation { buffer: buffer.downgrade(), - position: language::Anchor::MAX + position: language::Anchor::max_for_buffer( + cx.update(|cx| buffer.read(cx).remote_id()) + ), }) ); } diff --git a/crates/agent/src/tools/read_file_tool.rs b/crates/agent/src/tools/read_file_tool.rs index eccb40737c744d57792655cadb925e18a68d2835..77852c5fda674c55b324af8bae90d7d6a57bcff0 100644 --- a/crates/agent/src/tools/read_file_tool.rs +++ b/crates/agent/src/tools/read_file_tool.rs @@ -275,7 +275,9 @@ impl AgentTool for ReadFileTool { project.set_agent_location( Some(AgentLocation { buffer: buffer.downgrade(), - position: anchor.unwrap_or(text::Anchor::MIN), + position: anchor.unwrap_or_else(|| { + text::Anchor::min_for_buffer(buffer.read(cx).remote_id()) + }), }), cx, ); diff --git a/crates/agent_ui/src/acp/thread_view.rs b/crates/agent_ui/src/acp/thread_view.rs index fd0b1eedbdf80d1893760e6182cd2e57d96ef010..1c9e3f83e383658051f7799a7e3096f532addbe1 100644 --- a/crates/agent_ui/src/acp/thread_view.rs +++ b/crates/agent_ui/src/acp/thread_view.rs @@ -4103,7 +4103,9 @@ impl AcpThreadView { action_log .reject_edits_in_ranges( buffer.clone(), - vec![Anchor::MIN..Anchor::MAX], + vec![Anchor::min_max_range_for_buffer( + buffer.read(cx).remote_id(), + )], Some(telemetry.clone()), cx, ) @@ -4124,7 +4126,9 @@ impl AcpThreadView { action_log.update(cx, |action_log, cx| { action_log.keep_edits_in_range( buffer.clone(), - Anchor::MIN..Anchor::MAX, + Anchor::min_max_range_for_buffer( + buffer.read(cx).remote_id(), + ), Some(telemetry.clone()), cx, ); @@ -4743,11 +4747,8 @@ impl AcpThreadView { let buffer = multibuffer.as_singleton(); if agent_location.buffer.upgrade() == buffer { let excerpt_id = multibuffer.excerpt_ids().first().cloned(); - let anchor = editor::Anchor::in_buffer( - excerpt_id.unwrap(), - buffer.unwrap().read(cx).remote_id(), - agent_location.position, - ); + let anchor = + editor::Anchor::in_buffer(excerpt_id.unwrap(), agent_location.position); editor.change_selections(Default::default(), window, cx, |selections| { selections.select_anchor_ranges([anchor..anchor]); }) diff --git a/crates/agent_ui/src/agent_diff.rs b/crates/agent_ui/src/agent_diff.rs index 53e7a2f46d37e4cd2f0688d5af2a7d4a01174801..8aece1984ad597e629cd966c0e61d6a5681d7020 100644 --- a/crates/agent_ui/src/agent_diff.rs +++ b/crates/agent_ui/src/agent_diff.rs @@ -145,7 +145,7 @@ impl AgentDiffPane { let diff_hunk_ranges = diff .hunks_intersecting_range( - language::Anchor::MIN..language::Anchor::MAX, + language::Anchor::min_max_range_for_buffer(snapshot.remote_id()), &snapshot, cx, ) diff --git a/crates/agent_ui/src/inline_assistant.rs b/crates/agent_ui/src/inline_assistant.rs index 81242135757561a6c829cc9cabf8893294d9e875..0f617044546f186bddb2be5de3983edf9dad2e0c 100644 --- a/crates/agent_ui/src/inline_assistant.rs +++ b/crates/agent_ui/src/inline_assistant.rs @@ -440,7 +440,6 @@ impl InlineAssistant { { let anchor_range = Anchor::range_in_buffer( excerpt_id, - buffer.remote_id(), buffer.anchor_before(buffer_range.start)..buffer.anchor_after(buffer_range.end), ); diff --git a/crates/assistant_text_thread/src/text_thread.rs b/crates/assistant_text_thread/src/text_thread.rs index 2bc4ceec4c243a654abf04b19b4e2ba93a1fef4f..613c9b862e8a0b055465a73fe34c541ecb18d4a1 100644 --- a/crates/assistant_text_thread/src/text_thread.rs +++ b/crates/assistant_text_thread/src/text_thread.rs @@ -797,7 +797,7 @@ impl TextThread { }); let message = MessageAnchor { id: first_message_id, - start: language::Anchor::MIN, + start: language::Anchor::min_for_buffer(this.buffer.read(cx).remote_id()), }; this.messages_metadata.insert( first_message_id, @@ -1147,12 +1147,10 @@ impl TextThread { cx: &App, ) -> bool { let version = &self.buffer.read(cx).version; - let observed_start = range.start == language::Anchor::MIN - || range.start == language::Anchor::MAX - || version.observed(range.start.timestamp); - let observed_end = range.end == language::Anchor::MIN - || range.end == language::Anchor::MAX - || version.observed(range.end.timestamp); + let observed_start = + range.start.is_min() || range.start.is_max() || version.observed(range.start.timestamp); + let observed_end = + range.end.is_min() || range.end.is_max() || version.observed(range.end.timestamp); observed_start && observed_end } @@ -2858,7 +2856,8 @@ impl TextThread { messages.next(); } } - let message_end_anchor = message_end.unwrap_or(language::Anchor::MAX); + let message_end_anchor = + message_end.unwrap_or(language::Anchor::max_for_buffer(buffer.remote_id())); let message_end = message_end_anchor.to_offset(buffer); return Some(Message { diff --git a/crates/buffer_diff/src/buffer_diff.rs b/crates/buffer_diff/src/buffer_diff.rs index 52c6463b9bcccd242ef18e5f3dcb518bd335686d..38b9b8e4baa1d0789bf64ae53f28bc28bfe6bd98 100644 --- a/crates/buffer_diff/src/buffer_diff.rs +++ b/crates/buffer_diff/src/buffer_diff.rs @@ -153,6 +153,10 @@ impl std::fmt::Debug for BufferDiffInner { } impl BufferDiffSnapshot { + pub fn buffer_diff_id(&self) -> BufferId { + self.inner.base_text.remote_id() + } + fn empty(buffer: &text::BufferSnapshot, cx: &mut App) -> BufferDiffSnapshot { BufferDiffSnapshot { inner: BufferDiffInner { @@ -340,7 +344,7 @@ impl BufferDiffInner { }; let hunk = PendingHunk { - buffer_range: Anchor::MIN..Anchor::MAX, + buffer_range: Anchor::min_max_range_for_buffer(buffer.remote_id()), diff_base_byte_range: 0..index_text.map_or(0, |rope| rope.len()), buffer_version: buffer.version().clone(), new_status, @@ -780,7 +784,7 @@ fn compute_hunks( } else { tree.push( InternalDiffHunk { - buffer_range: Anchor::MIN..Anchor::MAX, + buffer_range: Anchor::min_max_range_for_buffer(buffer.remote_id()), diff_base_byte_range: 0..0, }, &buffer, @@ -941,10 +945,10 @@ impl BufferDiff { pub fn clear_pending_hunks(&mut self, cx: &mut Context) { if self.secondary_diff.is_some() { self.inner.pending_hunks = SumTree::from_summary(DiffHunkSummary { - buffer_range: Anchor::MIN..Anchor::MIN, + buffer_range: Anchor::min_min_range_for_buffer(self.buffer_id), }); cx.emit(BufferDiffEvent::DiffChanged { - changed_range: Some(Anchor::MIN..Anchor::MAX), + changed_range: Some(Anchor::min_max_range_for_buffer(self.buffer_id)), }); } } @@ -1065,7 +1069,10 @@ impl BufferDiff { { (false, new_state.compare(state, buffer)) } - _ => (true, Some(text::Anchor::MIN..text::Anchor::MAX)), + _ => ( + true, + Some(text::Anchor::min_max_range_for_buffer(self.buffer_id)), + ), }; if let Some(secondary_changed_range) = secondary_diff_change @@ -1126,7 +1133,11 @@ impl BufferDiff { buffer_snapshot: &'a text::BufferSnapshot, cx: &'a App, ) -> impl 'a + Iterator { - self.hunks_intersecting_range(Anchor::MIN..Anchor::MAX, buffer_snapshot, cx) + self.hunks_intersecting_range( + Anchor::min_max_range_for_buffer(buffer_snapshot.remote_id()), + buffer_snapshot, + cx, + ) } pub fn hunks_intersecting_range<'a>( @@ -1222,7 +1233,9 @@ impl BufferDiff { impl DiffHunk { pub fn is_created_file(&self) -> bool { - self.diff_base_byte_range == (0..0) && self.buffer_range == (Anchor::MIN..Anchor::MAX) + self.diff_base_byte_range == (0..0) + && self.buffer_range.start.is_min() + && self.buffer_range.end.is_min() } pub fn status(&self) -> DiffHunkStatus { @@ -1389,7 +1402,10 @@ mod tests { let mut buffer = Buffer::new(ReplicaId::LOCAL, BufferId::new(1).unwrap(), buffer_text); let mut diff = BufferDiffSnapshot::new_sync(buffer.clone(), diff_base.clone(), cx); assert_hunks( - diff.hunks_intersecting_range(Anchor::MIN..Anchor::MAX, &buffer), + diff.hunks_intersecting_range( + Anchor::min_max_range_for_buffer(buffer.remote_id()), + &buffer, + ), &buffer, &diff_base, &[(1..2, "two\n", "HELLO\n", DiffHunkStatus::modified_none())], @@ -1398,7 +1414,10 @@ mod tests { buffer.edit([(0..0, "point five\n")]); diff = BufferDiffSnapshot::new_sync(buffer.clone(), diff_base.clone(), cx); assert_hunks( - diff.hunks_intersecting_range(Anchor::MIN..Anchor::MAX, &buffer), + diff.hunks_intersecting_range( + Anchor::min_max_range_for_buffer(buffer.remote_id()), + &buffer, + ), &buffer, &diff_base, &[ @@ -1409,7 +1428,10 @@ mod tests { diff = cx.update(|cx| BufferDiffSnapshot::empty(&buffer, cx)); assert_hunks::<&str, _>( - diff.hunks_intersecting_range(Anchor::MIN..Anchor::MAX, &buffer), + diff.hunks_intersecting_range( + Anchor::min_max_range_for_buffer(buffer.remote_id()), + &buffer, + ), &buffer, &diff_base, &[], @@ -1483,7 +1505,10 @@ mod tests { ]; assert_hunks( - uncommitted_diff.hunks_intersecting_range(Anchor::MIN..Anchor::MAX, &buffer), + uncommitted_diff.hunks_intersecting_range( + Anchor::min_max_range_for_buffer(buffer.remote_id()), + &buffer, + ), &buffer, &head_text, &expected_hunks, @@ -1542,8 +1567,11 @@ mod tests { }) .await; assert_eq!( - diff.hunks_intersecting_range(Anchor::MIN..Anchor::MAX, &buffer) - .count(), + diff.hunks_intersecting_range( + Anchor::min_max_range_for_buffer(buffer.remote_id()), + &buffer + ) + .count(), 8 ); @@ -2155,8 +2183,12 @@ mod tests { let mut diff = uncommitted_diff(&working_copy, &index_text, head_text.clone(), cx); let mut hunks = diff.update(cx, |diff, cx| { - diff.hunks_intersecting_range(Anchor::MIN..Anchor::MAX, &working_copy, cx) - .collect::>() + diff.hunks_intersecting_range( + Anchor::min_max_range_for_buffer(diff.buffer_id), + &working_copy, + cx, + ) + .collect::>() }); if hunks.is_empty() { return; @@ -2185,8 +2217,12 @@ mod tests { diff = uncommitted_diff(&working_copy, &index_text, head_text.clone(), cx); let found_hunks = diff.update(cx, |diff, cx| { - diff.hunks_intersecting_range(Anchor::MIN..Anchor::MAX, &working_copy, cx) - .collect::>() + diff.hunks_intersecting_range( + Anchor::min_max_range_for_buffer(diff.buffer_id), + &working_copy, + cx, + ) + .collect::>() }); assert_eq!(hunks.len(), found_hunks.len()); diff --git a/crates/collab/src/tests/editor_tests.rs b/crates/collab/src/tests/editor_tests.rs index fe20ab935c9fb2ffd2c18962953f9d62ca06fb16..e5d3661aaf1aa0c74a4204e0989018121f5eb64a 100644 --- a/crates/collab/src/tests/editor_tests.rs +++ b/crates/collab/src/tests/editor_tests.rs @@ -1581,7 +1581,10 @@ async fn test_share_project( buffer_a.read_with(cx_a, |buffer, _| { buffer .snapshot() - .selections_in_range(text::Anchor::MIN..text::Anchor::MAX, false) + .selections_in_range( + text::Anchor::min_max_range_for_buffer(buffer.remote_id()), + false, + ) .count() == 1 }); @@ -1622,7 +1625,10 @@ async fn test_share_project( buffer_a.read_with(cx_a, |buffer, _| { buffer .snapshot() - .selections_in_range(text::Anchor::MIN..text::Anchor::MAX, false) + .selections_in_range( + text::Anchor::min_max_range_for_buffer(buffer.remote_id()), + false, + ) .count() == 0 }); diff --git a/crates/diagnostics/src/diagnostic_renderer.rs b/crates/diagnostics/src/diagnostic_renderer.rs index 2636b1aadc9708ff6832a5baa212277672dd305f..72ad7b591413832183bb85d58d188e692d46ffad 100644 --- a/crates/diagnostics/src/diagnostic_renderer.rs +++ b/crates/diagnostics/src/diagnostic_renderer.rs @@ -284,7 +284,7 @@ impl DiagnosticBlock { if range.context.overlaps(&diagnostic.range, &snapshot) { Self::jump_to( editor, - Anchor::range_in_buffer(excerpt_id, buffer_id, diagnostic.range), + Anchor::range_in_buffer(excerpt_id, diagnostic.range), window, cx, ); diff --git a/crates/diagnostics/src/diagnostics.rs b/crates/diagnostics/src/diagnostics.rs index b24a63b830b93cdbe14e2329abe524f6523cbbd6..413b73d1b6f679fa464d378760e37c773e1583e7 100644 --- a/crates/diagnostics/src/diagnostics.rs +++ b/crates/diagnostics/src/diagnostics.rs @@ -308,7 +308,7 @@ impl ProjectDiagnosticsEditor { .selections .all_anchors(&snapshot) .iter() - .filter_map(|anchor| anchor.start.buffer_id) + .filter_map(|anchor| anchor.start.text_anchor.buffer_id) .collect::>() }); for buffer_id in buffer_ids { diff --git a/crates/editor/src/display_map/block_map.rs b/crates/editor/src/display_map/block_map.rs index b55c5330dd398428c549ae1932c1f0a25c8e1436..58dea4010caaec98cdc14c9dc0e2b02af8ef1712 100644 --- a/crates/editor/src/display_map/block_map.rs +++ b/crates/editor/src/display_map/block_map.rs @@ -2976,7 +2976,7 @@ mod tests { ); } - #[gpui::test(iterations = 100)] + #[gpui::test(iterations = 60)] fn test_random_blocks(cx: &mut gpui::TestAppContext, mut rng: StdRng) { cx.update(init_test); diff --git a/crates/editor/src/editor.rs b/crates/editor/src/editor.rs index 08627f1bd64be6e62581014628c57306df43623e..cd7a872f8c129c3b67b544ed2ba78d7fde104b48 100644 --- a/crates/editor/src/editor.rs +++ b/crates/editor/src/editor.rs @@ -1780,7 +1780,7 @@ impl Editor { let start_row = (multi_buffer_visible_start.row).min(max_row); let end_row = (multi_buffer_visible_start.row + 10).min(max_row); - if let Some((excerpt_id, buffer_id, buffer)) = multi_buffer.read(cx).as_singleton() { + if let Some((excerpt_id, _, buffer)) = multi_buffer.read(cx).as_singleton() { let outline_items = buffer .outline_items_containing( Point::new(start_row, 0)..Point::new(end_row, 0), @@ -1790,10 +1790,9 @@ impl Editor { .into_iter() .map(|outline_item| OutlineItem { depth: outline_item.depth, - range: Anchor::range_in_buffer(*excerpt_id, buffer_id, outline_item.range), + range: Anchor::range_in_buffer(*excerpt_id, outline_item.range), source_range_for_text: Anchor::range_in_buffer( *excerpt_id, - buffer_id, outline_item.source_range_for_text, ), text: outline_item.text, @@ -1801,10 +1800,10 @@ impl Editor { name_ranges: outline_item.name_ranges, body_range: outline_item .body_range - .map(|range| Anchor::range_in_buffer(*excerpt_id, buffer_id, range)), + .map(|range| Anchor::range_in_buffer(*excerpt_id, range)), annotation_range: outline_item .annotation_range - .map(|range| Anchor::range_in_buffer(*excerpt_id, buffer_id, range)), + .map(|range| Anchor::range_in_buffer(*excerpt_id, range)), }); return Some(outline_items.collect()); } @@ -3259,7 +3258,7 @@ impl Editor { } if local { - if let Some(buffer_id) = new_cursor_position.buffer_id { + if let Some(buffer_id) = new_cursor_position.text_anchor.buffer_id { self.register_buffer(buffer_id, cx); } @@ -4198,8 +4197,8 @@ impl Editor { continue; } if self.selections.disjoint_anchor_ranges().any(|s| { - if s.start.buffer_id != selection.start.buffer_id - || s.end.buffer_id != selection.end.buffer_id + if s.start.text_anchor.buffer_id != selection.start.buffer_id + || s.end.text_anchor.buffer_id != selection.end.buffer_id { return false; } @@ -5484,6 +5483,7 @@ impl Editor { } let buffer_position = multibuffer_snapshot.anchor_before(position); let Some(buffer) = buffer_position + .text_anchor .buffer_id .and_then(|buffer_id| self.buffer.read(cx).buffer(buffer_id)) else { @@ -6923,8 +6923,7 @@ impl Editor { continue; } - let range = - Anchor::range_in_buffer(excerpt_id, buffer_id, *start..*end); + let range = Anchor::range_in_buffer(excerpt_id, *start..*end); if highlight.kind == lsp::DocumentHighlightKind::WRITE { write_ranges.push(range); } else { @@ -7033,11 +7032,8 @@ impl Editor { .anchor_after(search_range.start + match_range.start); let match_end = buffer_snapshot .anchor_before(search_range.start + match_range.end); - let match_anchor_range = Anchor::range_in_buffer( - excerpt_id, - buffer_snapshot.remote_id(), - match_start..match_end, - ); + let match_anchor_range = + Anchor::range_in_buffer(excerpt_id, match_start..match_end); (match_anchor_range != query_range).then_some(match_anchor_range) }), ); @@ -8212,8 +8208,7 @@ impl Editor { cx, ); for (breakpoint, state) in breakpoints { - let multi_buffer_anchor = - Anchor::in_buffer(excerpt_id, buffer_snapshot.remote_id(), breakpoint.position); + let multi_buffer_anchor = Anchor::in_buffer(excerpt_id, breakpoint.position); let position = multi_buffer_anchor .to_point(&multi_buffer_snapshot) .to_display_point(&snapshot); @@ -20804,8 +20799,7 @@ impl Editor { let start = highlight.range.start.to_display_point(&snapshot); let end = highlight.range.end.to_display_point(&snapshot); let start_row = start.row().0; - let end_row = if highlight.range.end.text_anchor != text::Anchor::MAX - && end.column() == 0 + let end_row = if !highlight.range.end.text_anchor.is_max() && end.column() == 0 { end.row().0.saturating_sub(1) } else { @@ -21361,7 +21355,7 @@ impl Editor { .for_each(|hint| { let inlay = Inlay::debugger( post_inc(&mut editor.next_inlay_id), - Anchor::in_buffer(excerpt_id, buffer_id, hint.position), + Anchor::in_buffer(excerpt_id, hint.position), hint.text(), ); if !inlay.text().chars().contains(&'\n') { @@ -24105,7 +24099,6 @@ impl EditorSnapshot { display_row_range: hunk_display_start.row()..end_row, multi_buffer_range: Anchor::range_in_buffer( hunk.excerpt_id, - hunk.buffer_id, hunk.buffer_range, ), is_created_file, diff --git a/crates/editor/src/editor_tests.rs b/crates/editor/src/editor_tests.rs index 119c7058e061406b4a75017b6c5c8717f9f250c0..f68b15b6b258a5ab730a13af9d7ecc62763321ea 100644 --- a/crates/editor/src/editor_tests.rs +++ b/crates/editor/src/editor_tests.rs @@ -21550,10 +21550,9 @@ async fn test_adjacent_diff_hunks(executor: BackgroundExecutor, cx: &mut TestApp .diff_hunks_in_ranges(&[Anchor::min()..Anchor::max()], &snapshot.buffer_snapshot()) .collect::>(); let excerpt_id = editor.buffer.read(cx).excerpt_ids()[0]; - let buffer_id = hunks[0].buffer_id; hunks .into_iter() - .map(|hunk| Anchor::range_in_buffer(excerpt_id, buffer_id, hunk.buffer_range)) + .map(|hunk| Anchor::range_in_buffer(excerpt_id, hunk.buffer_range)) .collect::>() }); assert_eq!(hunk_ranges.len(), 2); @@ -21641,10 +21640,9 @@ async fn test_adjacent_diff_hunks(executor: BackgroundExecutor, cx: &mut TestApp .diff_hunks_in_ranges(&[Anchor::min()..Anchor::max()], &snapshot.buffer_snapshot()) .collect::>(); let excerpt_id = editor.buffer.read(cx).excerpt_ids()[0]; - let buffer_id = hunks[0].buffer_id; hunks .into_iter() - .map(|hunk| Anchor::range_in_buffer(excerpt_id, buffer_id, hunk.buffer_range)) + .map(|hunk| Anchor::range_in_buffer(excerpt_id, hunk.buffer_range)) .collect::>() }); assert_eq!(hunk_ranges.len(), 2); @@ -21707,10 +21705,9 @@ async fn test_toggle_deletion_hunk_at_start_of_file( .diff_hunks_in_ranges(&[Anchor::min()..Anchor::max()], &snapshot.buffer_snapshot()) .collect::>(); let excerpt_id = editor.buffer.read(cx).excerpt_ids()[0]; - let buffer_id = hunks[0].buffer_id; hunks .into_iter() - .map(|hunk| Anchor::range_in_buffer(excerpt_id, buffer_id, hunk.buffer_range)) + .map(|hunk| Anchor::range_in_buffer(excerpt_id, hunk.buffer_range)) .collect::>() }); assert_eq!(hunk_ranges.len(), 1); diff --git a/crates/editor/src/element.rs b/crates/editor/src/element.rs index 71c76c0cb3eba0e70da140191ab5eb8daa5735bc..4ea12f0a21295d97cdcff565c484750e14334223 100644 --- a/crates/editor/src/element.rs +++ b/crates/editor/src/element.rs @@ -9283,7 +9283,7 @@ impl Element for EditorElement { HashMap::default(); for selection in all_anchor_selections.iter() { let head = selection.head(); - if let Some(buffer_id) = head.buffer_id { + if let Some(buffer_id) = head.text_anchor.buffer_id { anchors_by_buffer .entry(buffer_id) .and_modify(|(latest_id, latest_anchor)| { diff --git a/crates/editor/src/inlays/inlay_hints.rs b/crates/editor/src/inlays/inlay_hints.rs index cd9456be7a109ce5c2535339bc153bb5434ab94f..4379276707ceacd07935d660d54ac3b52216a720 100644 --- a/crates/editor/src/inlays/inlay_hints.rs +++ b/crates/editor/src/inlays/inlay_hints.rs @@ -584,8 +584,11 @@ impl Editor { }) .max_by_key(|hint| hint.id) { - if let Some(ResolvedHint::Resolved(cached_hint)) = - hovered_hint.position.buffer_id.and_then(|buffer_id| { + if let Some(ResolvedHint::Resolved(cached_hint)) = hovered_hint + .position + .text_anchor + .buffer_id + .and_then(|buffer_id| { lsp_store.update(cx, |lsp_store, cx| { lsp_store.resolved_hint(buffer_id, hovered_hint.id, cx) }) @@ -757,7 +760,7 @@ impl Editor { let visible_inlay_hint_ids = self .visible_inlay_hints(cx) .iter() - .filter(|inlay| inlay.position.buffer_id == Some(buffer_id)) + .filter(|inlay| inlay.position.text_anchor.buffer_id == Some(buffer_id)) .map(|inlay| inlay.id) .collect::>(); let Some(inlay_hints) = &mut self.inlay_hints else { @@ -858,9 +861,13 @@ impl Editor { self.visible_inlay_hints(cx) .iter() .filter(|inlay| { - inlay.position.buffer_id.is_none_or(|buffer_id| { - invalidate_hints_for_buffers.contains(&buffer_id) - }) + inlay + .position + .text_anchor + .buffer_id + .is_none_or(|buffer_id| { + invalidate_hints_for_buffers.contains(&buffer_id) + }) }) .map(|inlay| inlay.id), ); diff --git a/crates/editor/src/items.rs b/crates/editor/src/items.rs index 157ad84d053b9125dfd59243098deb680be7b264..7e82336b4403cc8142983ef3802a9cdb9ca9cf2b 100644 --- a/crates/editor/src/items.rs +++ b/crates/editor/src/items.rs @@ -455,21 +455,13 @@ async fn update_editor_from_message( })??; // Deserialize the editor state. - let (selections, pending_selection, scroll_top_anchor) = this.update(cx, |editor, cx| { - let buffer = editor.buffer.read(cx).read(cx); - let selections = message - .selections - .into_iter() - .filter_map(|selection| deserialize_selection(&buffer, selection)) - .collect::>(); - let pending_selection = message - .pending_selection - .and_then(|selection| deserialize_selection(&buffer, selection)); - let scroll_top_anchor = message - .scroll_top_anchor - .and_then(|anchor| deserialize_anchor(&buffer, anchor)); - anyhow::Ok((selections, pending_selection, scroll_top_anchor)) - })??; + let selections = message + .selections + .into_iter() + .filter_map(deserialize_selection) + .collect::>(); + let pending_selection = message.pending_selection.and_then(deserialize_selection); + let scroll_top_anchor = message.scroll_top_anchor.and_then(deserialize_anchor); // Wait until the buffer has received all of the operations referenced by // the editor's new state. @@ -563,24 +555,20 @@ fn deserialize_excerpt_range( )) } -fn deserialize_selection( - buffer: &MultiBufferSnapshot, - selection: proto::Selection, -) -> Option> { +fn deserialize_selection(selection: proto::Selection) -> Option> { Some(Selection { id: selection.id as usize, - start: deserialize_anchor(buffer, selection.start?)?, - end: deserialize_anchor(buffer, selection.end?)?, + start: deserialize_anchor(selection.start?)?, + end: deserialize_anchor(selection.end?)?, reversed: selection.reversed, goal: SelectionGoal::None, }) } -fn deserialize_anchor(buffer: &MultiBufferSnapshot, anchor: proto::EditorAnchor) -> Option { +fn deserialize_anchor(anchor: proto::EditorAnchor) -> Option { let excerpt_id = ExcerptId::from_proto(anchor.excerpt_id); Some(Anchor::in_buffer( excerpt_id, - buffer.buffer_id_for_excerpt(excerpt_id)?, language::proto::deserialize_anchor(anchor.anchor?)?, )) } @@ -1374,7 +1362,7 @@ impl ProjectItem for Editor { cx: &mut Context, ) -> Self { let mut editor = Self::for_buffer(buffer.clone(), Some(project), window, cx); - if let Some((excerpt_id, buffer_id, snapshot)) = + if let Some((excerpt_id, _, snapshot)) = editor.buffer().read(cx).snapshot(cx).as_singleton() && WorkspaceSettings::get(None, cx).restore_on_file_reopen && let Some(restoration_data) = Self::project_item_kind() @@ -1397,11 +1385,8 @@ impl ProjectItem for Editor { }); } let (top_row, offset) = restoration_data.scroll_position; - let anchor = Anchor::in_buffer( - *excerpt_id, - buffer_id, - snapshot.anchor_before(Point::new(top_row, 0)), - ); + let anchor = + Anchor::in_buffer(*excerpt_id, snapshot.anchor_before(Point::new(top_row, 0))); editor.set_scroll_anchor(ScrollAnchor { anchor, offset }, window, cx); } @@ -1783,11 +1768,7 @@ impl SearchableItem for Editor { .anchor_after(search_range.start + match_range.start); let end = search_buffer .anchor_before(search_range.start + match_range.end); - Anchor::range_in_buffer( - excerpt_id, - search_buffer.remote_id(), - start..end, - ) + Anchor::range_in_buffer(excerpt_id, start..end) } }), ); diff --git a/crates/editor/src/linked_editing_ranges.rs b/crates/editor/src/linked_editing_ranges.rs index 33635a2ae2009031220ab0a58e99f8b07957de94..ff3096961d646a2a98458319d927a4e2723d0602 100644 --- a/crates/editor/src/linked_editing_ranges.rs +++ b/crates/editor/src/linked_editing_ranges.rs @@ -70,8 +70,8 @@ pub(super) fn refresh_linked_ranges( let cursor_position = selection.head(); let start_position = snapshot.anchor_before(cursor_position); let end_position = snapshot.anchor_after(selection.tail()); - if start_position.buffer_id != end_position.buffer_id - || end_position.buffer_id.is_none() + if start_position.text_anchor.buffer_id != end_position.text_anchor.buffer_id + || end_position.text_anchor.buffer_id.is_none() { // Throw away selections spanning multiple buffers. continue; diff --git a/crates/editor/src/lsp_ext.rs b/crates/editor/src/lsp_ext.rs index 36353e8d42527cd59043ab3cf2b6105c534412d9..37cc734ab1ef0a0b677b3e405ff70b461d349a1c 100644 --- a/crates/editor/src/lsp_ext.rs +++ b/crates/editor/src/lsp_ext.rs @@ -37,7 +37,7 @@ where .selections .disjoint_anchors_arc() .iter() - .filter_map(|selection| Some((selection.head(), selection.head().buffer_id?))) + .filter_map(|selection| Some((selection.head(), selection.head().text_anchor.buffer_id?))) .unique_by(|(_, buffer_id)| *buffer_id) .find_map(|(trigger_anchor, buffer_id)| { let buffer = editor.buffer().read(cx).buffer(buffer_id)?; diff --git a/crates/editor/src/rust_analyzer_ext.rs b/crates/editor/src/rust_analyzer_ext.rs index ffa0c017c0eb157df776cc49e0dba51e617e3379..f548db75ad5d8cfe32a59a798b6d23931c34f215 100644 --- a/crates/editor/src/rust_analyzer_ext.rs +++ b/crates/editor/src/rust_analyzer_ext.rs @@ -322,7 +322,11 @@ fn cancel_flycheck_action( .disjoint_anchors_arc() .iter() .find_map(|selection| { - let buffer_id = selection.start.buffer_id.or(selection.end.buffer_id)?; + let buffer_id = selection + .start + .text_anchor + .buffer_id + .or(selection.end.text_anchor.buffer_id)?; let project = project.read(cx); let entry_id = project .buffer_for_id(buffer_id, cx)? @@ -347,7 +351,11 @@ fn run_flycheck_action( .disjoint_anchors_arc() .iter() .find_map(|selection| { - let buffer_id = selection.start.buffer_id.or(selection.end.buffer_id)?; + let buffer_id = selection + .start + .text_anchor + .buffer_id + .or(selection.end.text_anchor.buffer_id)?; let project = project.read(cx); let entry_id = project .buffer_for_id(buffer_id, cx)? @@ -372,7 +380,11 @@ fn clear_flycheck_action( .disjoint_anchors_arc() .iter() .find_map(|selection| { - let buffer_id = selection.start.buffer_id.or(selection.end.buffer_id)?; + let buffer_id = selection + .start + .text_anchor + .buffer_id + .or(selection.end.text_anchor.buffer_id)?; let project = project.read(cx); let entry_id = project .buffer_for_id(buffer_id, cx)? diff --git a/crates/editor/src/test/editor_test_context.rs b/crates/editor/src/test/editor_test_context.rs index 5793bcf576c7ed0e1604c30aada0fb362f65bb9f..cd45a6ec47ad7631404189194a6a0291a6240647 100644 --- a/crates/editor/src/test/editor_test_context.rs +++ b/crates/editor/src/test/editor_test_context.rs @@ -490,11 +490,7 @@ impl EditorTestContext { ); assert_eq!( multibuffer_snapshot - .text_for_range(Anchor::range_in_buffer( - excerpt_id, - snapshot.remote_id(), - range.context.clone() - )) + .text_for_range(Anchor::range_in_buffer(excerpt_id, range.context.clone())) .collect::(), expected_text, "{}", @@ -675,11 +671,7 @@ impl std::fmt::Display for FormatMultiBufferAsMarkedText { } let mut text = multibuffer_snapshot - .text_for_range(Anchor::range_in_buffer( - *excerpt_id, - snapshot.remote_id(), - range.context.clone(), - )) + .text_for_range(Anchor::range_in_buffer(*excerpt_id, range.context.clone())) .collect::(); let selections = selections diff --git a/crates/git_ui/src/commit_view.rs b/crates/git_ui/src/commit_view.rs index 3251ab43f71292d2d46503ef83f61692f385dc76..41fd99982c97967c016d9a59199f22ea7ba6115c 100644 --- a/crates/git_ui/src/commit_view.rs +++ b/crates/git_ui/src/commit_view.rs @@ -223,7 +223,11 @@ impl CommitView { let snapshot = buffer.read(cx).snapshot(); let diff = buffer_diff.read(cx); let diff_hunk_ranges = diff - .hunks_intersecting_range(Anchor::MIN..Anchor::MAX, &snapshot, cx) + .hunks_intersecting_range( + Anchor::min_max_range_for_buffer(diff.buffer_id), + &snapshot, + cx, + ) .map(|diff_hunk| diff_hunk.buffer_range.to_point(&snapshot)) .collect::>(); let path = snapshot.file().unwrap().path().clone(); diff --git a/crates/git_ui/src/project_diff.rs b/crates/git_ui/src/project_diff.rs index 715b74db333e78081a245f2fb362426591db79d9..a6de68b789c33ff75b8e6f474f31b7f9f6d8399c 100644 --- a/crates/git_ui/src/project_diff.rs +++ b/crates/git_ui/src/project_diff.rs @@ -383,12 +383,8 @@ impl ProjectDiff { .collect::>(); if !ranges.iter().any(|range| range.start != range.end) { selection = false; - if let Some((excerpt_id, buffer, range)) = self.editor.read(cx).active_excerpt(cx) { - ranges = vec![multi_buffer::Anchor::range_in_buffer( - excerpt_id, - buffer.read(cx).remote_id(), - range, - )]; + if let Some((excerpt_id, _, range)) = self.editor.read(cx).active_excerpt(cx) { + ranges = vec![multi_buffer::Anchor::range_in_buffer(excerpt_id, range)]; } else { ranges = Vec::default(); } @@ -488,7 +484,11 @@ impl ProjectDiff { let snapshot = buffer.read(cx).snapshot(); let diff_read = diff.read(cx); let diff_hunk_ranges = diff_read - .hunks_intersecting_range(Anchor::MIN..Anchor::MAX, &snapshot, cx) + .hunks_intersecting_range( + Anchor::min_max_range_for_buffer(diff_read.buffer_id), + &snapshot, + cx, + ) .map(|diff_hunk| diff_hunk.buffer_range); let conflicts = conflict_addon .conflict_set(snapshot.remote_id()) diff --git a/crates/language/src/buffer_tests.rs b/crates/language/src/buffer_tests.rs index 05402abcad478e2eedb17d31853ab0bc2bd3702c..efef0a08127bc66f9c6d8f21fe5a545dbee20fb1 100644 --- a/crates/language/src/buffer_tests.rs +++ b/crates/language/src/buffer_tests.rs @@ -3427,7 +3427,7 @@ fn test_random_collaboration(cx: &mut App, mut rng: StdRng) { for buffer in &buffers { let buffer = buffer.read(cx).snapshot(); let actual_remote_selections = buffer - .selections_in_range(Anchor::MIN..Anchor::MAX, false) + .selections_in_range(Anchor::min_max_range_for_buffer(buffer.remote_id()), false) .map(|(replica_id, _, _, selections)| (replica_id, selections.collect::>())) .collect::>(); let expected_remote_selections = active_selections diff --git a/crates/language/src/syntax_map.rs b/crates/language/src/syntax_map.rs index 33a652b6fdeb32a2adbc1743cf8a70fe453518f5..8574d52ff900563ddfb733c09204caab5eb6ae44 100644 --- a/crates/language/src/syntax_map.rs +++ b/crates/language/src/syntax_map.rs @@ -330,7 +330,7 @@ impl SyntaxSnapshot { let slice = cursor.slice( &SyntaxLayerPosition { depth: depth + 1, - range: Anchor::MIN..Anchor::MAX, + range: Anchor::min_max_range_for_buffer(text.remote_id()), language: None, }, Bias::Left, @@ -493,7 +493,7 @@ impl SyntaxSnapshot { start_point: Point::zero().to_ts_point(), end_point: text.max_point().to_ts_point(), }], - range: Anchor::MIN..Anchor::MAX, + range: Anchor::min_max_range_for_buffer(text.remote_id()), mode: ParseMode::Single, }); @@ -515,7 +515,7 @@ impl SyntaxSnapshot { } else { SyntaxLayerPosition { depth: max_depth + 1, - range: Anchor::MAX..Anchor::MAX, + range: Anchor::min_max_range_for_buffer(text.remote_id()), language: None, } }; diff --git a/crates/multi_buffer/src/anchor.rs b/crates/multi_buffer/src/anchor.rs index b8c1680574a86354d92f39c544c202642293f619..51696ba09e4bdb1c6be065f63d3ee7ff634e6b1a 100644 --- a/crates/multi_buffer/src/anchor.rs +++ b/crates/multi_buffer/src/anchor.rs @@ -7,12 +7,9 @@ use std::{ ops::{AddAssign, Range, Sub}, }; use sum_tree::Bias; -use text::BufferId; #[derive(Clone, Copy, Eq, PartialEq, Hash)] pub struct Anchor { - /// Invariant: If buffer id is `None`, excerpt id must be `ExcerptId::min()` or `ExcerptId::max()`. - pub buffer_id: Option, pub excerpt_id: ExcerptId, pub text_anchor: text::Anchor, pub diff_base_anchor: Option, @@ -20,15 +17,14 @@ pub struct Anchor { impl std::fmt::Debug for Anchor { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - if *self == Self::min() { - return f.write_str("Anchor::MIN"); + if self.is_min() { + return write!(f, "Anchor::min({:?})", self.text_anchor.buffer_id); } - if *self == Self::max() { - return f.write_str("Anchor::MAX"); + if self.is_max() { + return write!(f, "Anchor::max({:?})", self.text_anchor.buffer_id); } f.debug_struct("Anchor") - .field("buffer_id", &self.buffer_id) .field("excerpt_id", &self.excerpt_id) .field("text_anchor", &self.text_anchor) .field("diff_base_anchor", &self.diff_base_anchor) @@ -44,35 +40,20 @@ impl Anchor { } } - pub fn in_buffer( - excerpt_id: ExcerptId, - buffer_id: BufferId, - text_anchor: text::Anchor, - ) -> Self { - debug_assert!( - text_anchor.buffer_id.is_none_or(|id| id == buffer_id), - "buffer id does not match the one in the text anchor: {buffer_id:?} {text_anchor:?}", - ); + pub fn in_buffer(excerpt_id: ExcerptId, text_anchor: text::Anchor) -> Self { Self { - buffer_id: Some(buffer_id), excerpt_id, text_anchor, diff_base_anchor: None, } } - pub fn range_in_buffer( - excerpt_id: ExcerptId, - buffer_id: BufferId, - range: Range, - ) -> Range { - Self::in_buffer(excerpt_id, buffer_id, range.start) - ..Self::in_buffer(excerpt_id, buffer_id, range.end) + pub fn range_in_buffer(excerpt_id: ExcerptId, range: Range) -> Range { + Self::in_buffer(excerpt_id, range.start)..Self::in_buffer(excerpt_id, range.end) } pub fn min() -> Self { Self { - buffer_id: None, excerpt_id: ExcerptId::min(), text_anchor: text::Anchor::MIN, diff_base_anchor: None, @@ -81,13 +62,24 @@ impl Anchor { pub fn max() -> Self { Self { - buffer_id: None, excerpt_id: ExcerptId::max(), text_anchor: text::Anchor::MAX, diff_base_anchor: None, } } + pub fn is_min(&self) -> bool { + self.excerpt_id == ExcerptId::min() + && self.text_anchor.is_min() + && self.diff_base_anchor.is_none() + } + + pub fn is_max(&self) -> bool { + self.excerpt_id == ExcerptId::max() + && self.text_anchor.is_max() + && self.diff_base_anchor.is_none() + } + pub fn cmp(&self, other: &Anchor, snapshot: &MultiBufferSnapshot) -> Ordering { if self == other { return Ordering::Equal; @@ -101,8 +93,8 @@ impl Anchor { return excerpt_id_cmp; } if self_excerpt_id == ExcerptId::max() - && self.text_anchor == text::Anchor::MAX - && self.text_anchor == text::Anchor::MAX + && self.text_anchor.is_max() + && self.text_anchor.is_max() && self.diff_base_anchor.is_none() && other.diff_base_anchor.is_none() { @@ -147,7 +139,6 @@ impl Anchor { && let Some(excerpt) = snapshot.excerpt(self.excerpt_id) { return Self { - buffer_id: Some(excerpt.buffer_id), excerpt_id: excerpt.id, text_anchor: self.text_anchor.bias_left(&excerpt.buffer), diff_base_anchor: self.diff_base_anchor.map(|a| { @@ -171,7 +162,6 @@ impl Anchor { && let Some(excerpt) = snapshot.excerpt(self.excerpt_id) { return Self { - buffer_id: Some(excerpt.buffer_id), excerpt_id: excerpt.id, text_anchor: self.text_anchor.bias_right(&excerpt.buffer), diff_base_anchor: self.diff_base_anchor.map(|a| { @@ -202,8 +192,8 @@ impl Anchor { } pub fn is_valid(&self, snapshot: &MultiBufferSnapshot) -> bool { - if *self == Anchor::min() || self.excerpt_id == ExcerptId::max() { - !snapshot.is_empty() + if self.is_min() || self.is_max() { + true } else if let Some(excerpt) = snapshot.excerpt(self.excerpt_id) { (self.text_anchor == excerpt.range.context.start || self.text_anchor == excerpt.range.context.end diff --git a/crates/multi_buffer/src/multi_buffer.rs b/crates/multi_buffer/src/multi_buffer.rs index 7922692d30eb3a79e835f5e4b94313c3ea886a7c..2e59eaa621c79bc8d0d0a149704cb55314e9b70d 100644 --- a/crates/multi_buffer/src/multi_buffer.rs +++ b/crates/multi_buffer/src/multi_buffer.rs @@ -158,12 +158,13 @@ impl MultiBufferDiffHunk { pub fn is_created_file(&self) -> bool { self.diff_base_byte_range == (BufferOffset(0)..BufferOffset(0)) - && self.buffer_range == (text::Anchor::MIN..text::Anchor::MAX) + && self.buffer_range.start.is_min() + && self.buffer_range.end.is_max() } pub fn multi_buffer_range(&self) -> Range { - let start = Anchor::in_buffer(self.excerpt_id, self.buffer_id, self.buffer_range.start); - let end = Anchor::in_buffer(self.excerpt_id, self.buffer_id, self.buffer_range.end); + let start = Anchor::in_buffer(self.excerpt_id, self.buffer_range.start); + let end = Anchor::in_buffer(self.excerpt_id, self.buffer_range.end); start..end } } @@ -1028,9 +1029,12 @@ impl MultiBuffer { }, ); this.singleton = true; + let buffer_id = buffer.read(cx).remote_id(); this.push_excerpts( buffer, - [ExcerptRange::new(text::Anchor::MIN..text::Anchor::MAX)], + [ExcerptRange::new(text::Anchor::min_max_range_for_buffer( + buffer_id, + ))], cx, ); this @@ -1912,7 +1916,7 @@ impl MultiBuffer { } pub fn buffer_for_anchor(&self, anchor: Anchor, cx: &App) -> Option> { - if let Some(buffer_id) = anchor.buffer_id { + if let Some(buffer_id) = anchor.text_anchor.buffer_id { self.buffer(buffer_id) } else { let (_, buffer, _) = self.excerpt_containing(anchor, cx)?; @@ -1975,7 +1979,7 @@ impl MultiBuffer { found.map(|(point, excerpt_id)| { let text_anchor = snapshot.anchor_after(point); - Anchor::in_buffer(excerpt_id, snapshot.remote_id(), text_anchor) + Anchor::in_buffer(excerpt_id, text_anchor) }) } @@ -1990,7 +1994,7 @@ impl MultiBuffer { if range.context.start.cmp(&anchor, &snapshot).is_le() && range.context.end.cmp(&anchor, &snapshot).is_ge() { - return Some(Anchor::in_buffer(excerpt_id, snapshot.remote_id(), anchor)); + return Some(Anchor::in_buffer(excerpt_id, anchor)); } } @@ -2112,7 +2116,7 @@ impl MultiBuffer { let mut error = None; let mut futures = Vec::new(); for anchor in anchors { - if let Some(buffer_id) = anchor.buffer_id { + if let Some(buffer_id) = anchor.text_anchor.buffer_id { if let Some(buffer) = self.buffers.get(&buffer_id) { buffer.buffer.update(cx, |buffer, _| { futures.push(buffer.wait_for_anchors([anchor.text_anchor])) @@ -2143,7 +2147,11 @@ impl MultiBuffer { ) -> Option<(Entity, language::Anchor)> { let snapshot = self.read(cx); let anchor = snapshot.anchor_before(position); - let buffer = self.buffers.get(&anchor.buffer_id?)?.buffer.clone(); + let buffer = self + .buffers + .get(&anchor.text_anchor.buffer_id?)? + .buffer + .clone(); Some((buffer, anchor.text_anchor)) } @@ -2205,7 +2213,7 @@ impl MultiBuffer { .get(&buffer_id) .is_none_or(|old_diff| !new_diff.base_texts_eq(old_diff)); - snapshot.diffs.insert(buffer_id, new_diff); + snapshot.diffs.insert_or_replace(buffer_id, new_diff); let mut excerpt_edits = Vec::new(); for locator in &buffer_state.excerpts { @@ -2402,7 +2410,11 @@ impl MultiBuffer { pub fn add_diff(&mut self, diff: Entity, cx: &mut Context) { let buffer_id = diff.read(cx).buffer_id; - self.buffer_diff_changed(diff.clone(), text::Anchor::MIN..text::Anchor::MAX, cx); + self.buffer_diff_changed( + diff.clone(), + text::Anchor::min_max_range_for_buffer(buffer_id), + cx, + ); self.diffs.insert(buffer_id, DiffState::new(diff, cx)); } @@ -2500,16 +2512,8 @@ impl MultiBuffer { if last_hunk_row.is_some_and(|row| row >= diff_hunk.row_range.start) { continue; } - let start = Anchor::in_buffer( - diff_hunk.excerpt_id, - diff_hunk.buffer_id, - diff_hunk.buffer_range.start, - ); - let end = Anchor::in_buffer( - diff_hunk.excerpt_id, - diff_hunk.buffer_id, - diff_hunk.buffer_range.end, - ); + let start = Anchor::in_buffer(diff_hunk.excerpt_id, diff_hunk.buffer_range.start); + let end = Anchor::in_buffer(diff_hunk.excerpt_id, diff_hunk.buffer_range.end); let start = snapshot.excerpt_offset_for_anchor(&start); let end = snapshot.excerpt_offset_for_anchor(&end); last_hunk_row = Some(diff_hunk.row_range.start); @@ -3945,9 +3949,7 @@ impl MultiBufferSnapshot { if hunk_end >= current_position { continue; } - let start = - Anchor::in_buffer(excerpt.id, excerpt.buffer_id, hunk.buffer_range.start) - .to_point(self); + let start = Anchor::in_buffer(excerpt.id, hunk.buffer_range.start).to_point(self); return Some(MultiBufferRow(start.row)); } } @@ -3964,8 +3966,7 @@ impl MultiBufferSnapshot { let Some(hunk) = hunks.next() else { continue; }; - let start = Anchor::in_buffer(excerpt.id, excerpt.buffer_id, hunk.buffer_range.start) - .to_point(self); + let start = Anchor::in_buffer(excerpt.id, hunk.buffer_range.start).to_point(self); return Some(MultiBufferRow(start.row)); } } @@ -4955,7 +4956,7 @@ impl MultiBufferSnapshot { { text_anchor = excerpt.range.context.end; } - Anchor::in_buffer(excerpt.id, excerpt.buffer_id, text_anchor) + Anchor::in_buffer(excerpt.id, text_anchor) } else if let Some(excerpt) = prev_excerpt { let mut text_anchor = excerpt .range @@ -4968,7 +4969,7 @@ impl MultiBufferSnapshot { { text_anchor = excerpt.range.context.start; } - Anchor::in_buffer(excerpt.id, excerpt.buffer_id, text_anchor) + Anchor::in_buffer(excerpt.id, text_anchor) } else if anchor.text_anchor.bias == Bias::Left { Anchor::min() } else { @@ -5050,7 +5051,7 @@ impl MultiBufferSnapshot { let buffer_start = excerpt.range.context.start.to_offset(&excerpt.buffer); let text_anchor = excerpt.clip_anchor(excerpt.buffer.anchor_at(buffer_start + overshoot, bias)); - let anchor = Anchor::in_buffer(excerpt.id, excerpt.buffer_id, text_anchor); + let anchor = Anchor::in_buffer(excerpt.id, text_anchor); match diff_base_anchor { Some(diff_base_anchor) => anchor.with_diff_base_anchor(diff_base_anchor), None => anchor, @@ -5066,7 +5067,11 @@ impl MultiBufferSnapshot { /// Wraps the [`text::Anchor`] in a [`multi_buffer::Anchor`] if this multi-buffer is a singleton. pub fn as_singleton_anchor(&self, text_anchor: text::Anchor) -> Option { let (excerpt, buffer, _) = self.as_singleton()?; - Some(Anchor::in_buffer(*excerpt, buffer, text_anchor)) + if text_anchor.buffer_id.is_none_or(|id| id == buffer) { + Some(Anchor::in_buffer(*excerpt, text_anchor)) + } else { + None + } } /// Returns an anchor for the given excerpt and text anchor, @@ -5099,12 +5104,8 @@ impl MultiBufferSnapshot { match text_anchor.buffer_id { Some(buffer_id) if buffer_id == excerpt.buffer_id => (), Some(_) => return None, - None if text_anchor == text::Anchor::MAX || text_anchor == text::Anchor::MIN => { - return Some(Anchor::in_buffer( - excerpt.id, - excerpt.buffer_id, - text_anchor, - )); + None if text_anchor.is_max() || text_anchor.is_min() => { + return Some(Anchor::in_buffer(excerpt.id, text_anchor)); } None => return None, } @@ -5116,11 +5117,7 @@ impl MultiBufferSnapshot { return None; } - Some(Anchor::in_buffer( - excerpt.id, - excerpt.buffer_id, - text_anchor, - )) + Some(Anchor::in_buffer(excerpt.id, text_anchor)) } pub fn context_range_for_excerpt(&self, excerpt_id: ExcerptId) -> Option> { @@ -5128,7 +5125,7 @@ impl MultiBufferSnapshot { } pub fn can_resolve(&self, anchor: &Anchor) -> bool { - if *anchor == Anchor::min() || anchor.excerpt_id == ExcerptId::max() { + if anchor.is_min() || anchor.is_max() { // todo(lw): should be `!self.is_empty()` true } else if let Some(excerpt) = self.excerpt(anchor.excerpt_id) { @@ -5998,7 +5995,7 @@ impl MultiBufferSnapshot { .. } = self.excerpt(anchor.excerpt_id)?; if cfg!(debug_assertions) { - match anchor.buffer_id { + match anchor.text_anchor.buffer_id { // we clearly are hitting this according to sentry, but in what situations can this occur? Some(anchor_buffer_id) => { assert_eq!( @@ -6006,7 +6003,7 @@ impl MultiBufferSnapshot { "anchor {anchor:?} does not match with resolved excerpt {excerpt:?}" ) } - None => assert_eq!(anchor, Anchor::max()), + None => assert!(anchor.is_max()), } }; Some(( @@ -6019,19 +6016,18 @@ impl MultiBufferSnapshot { depth: item.depth, source_range_for_text: Anchor::range_in_buffer( excerpt_id, - buffer_id, item.source_range_for_text, ), - range: Anchor::range_in_buffer(excerpt_id, buffer_id, item.range), + range: Anchor::range_in_buffer(excerpt_id, item.range), text: item.text, highlight_ranges: item.highlight_ranges, name_ranges: item.name_ranges, - body_range: item.body_range.map(|body_range| { - Anchor::range_in_buffer(excerpt_id, buffer_id, body_range) - }), - annotation_range: item.annotation_range.map(|body_range| { - Anchor::range_in_buffer(excerpt_id, buffer_id, body_range) - }), + body_range: item + .body_range + .map(|body_range| Anchor::range_in_buffer(excerpt_id, body_range)), + annotation_range: item + .annotation_range + .map(|body_range| Anchor::range_in_buffer(excerpt_id, body_range)), }) }) .collect(), @@ -6180,7 +6176,7 @@ impl MultiBufferSnapshot { } pub fn buffer_id_for_anchor(&self, anchor: Anchor) -> Option { - if let Some(id) = anchor.buffer_id { + if let Some(id) = anchor.text_anchor.buffer_id { return Some(id); } let excerpt = self.excerpt_containing(anchor..anchor)?; @@ -6212,10 +6208,8 @@ impl MultiBufferSnapshot { .selections_in_range(query_range, include_local) .flat_map(move |(replica_id, line_mode, cursor_shape, selections)| { selections.map(move |selection| { - let mut start = - Anchor::in_buffer(excerpt.id, excerpt.buffer_id, selection.start); - let mut end = - Anchor::in_buffer(excerpt.id, excerpt.buffer_id, selection.end); + let mut start = Anchor::in_buffer(excerpt.id, selection.start); + let mut end = Anchor::in_buffer(excerpt.id, selection.end); if range.start.cmp(&start, self).is_gt() { start = range.start; } @@ -6687,7 +6681,8 @@ impl Excerpt { } fn contains(&self, anchor: &Anchor) -> bool { - (anchor.buffer_id == None || anchor.buffer_id == Some(self.buffer_id)) + (anchor.text_anchor.buffer_id == None + || anchor.text_anchor.buffer_id == Some(self.buffer_id)) && self .range .context @@ -6723,19 +6718,11 @@ impl<'a> MultiBufferExcerpt<'a> { } pub fn start_anchor(&self) -> Anchor { - Anchor::in_buffer( - self.excerpt.id, - self.excerpt.buffer_id, - self.excerpt.range.context.start, - ) + Anchor::in_buffer(self.excerpt.id, self.excerpt.range.context.start) } pub fn end_anchor(&self) -> Anchor { - Anchor::in_buffer( - self.excerpt.id, - self.excerpt.buffer_id, - self.excerpt.range.context.end, - ) + Anchor::in_buffer(self.excerpt.id, self.excerpt.range.context.end) } pub fn buffer(&self) -> &'a BufferSnapshot { diff --git a/crates/multi_buffer/src/multi_buffer_tests.rs b/crates/multi_buffer/src/multi_buffer_tests.rs index 9517f1f76ece2f34aa5c95eb27b408e1ef004b99..e95d222c651999645a6966195be2da31347f1409 100644 --- a/crates/multi_buffer/src/multi_buffer_tests.rs +++ b/crates/multi_buffer/src/multi_buffer_tests.rs @@ -3401,14 +3401,11 @@ fn test_summaries_for_anchors(cx: &mut TestAppContext) { ), ); - let id_1 = buffer_1.read_with(cx, |buffer, _| buffer.remote_id()); - let id_2 = buffer_2.read_with(cx, |buffer, _| buffer.remote_id()); - - let anchor_1 = Anchor::in_buffer(ids[0], id_1, text::Anchor::MIN); + let anchor_1 = Anchor::in_buffer(ids[0], text::Anchor::MIN); let point_1 = snapshot.summaries_for_anchors::([&anchor_1])[0]; assert_eq!(point_1, Point::new(0, 0)); - let anchor_2 = Anchor::in_buffer(ids[1], id_2, text::Anchor::MIN); + let anchor_2 = Anchor::in_buffer(ids[1], text::Anchor::MIN); let point_2 = snapshot.summaries_for_anchors::([&anchor_2])[0]; assert_eq!(point_2, Point::new(3, 0)); } diff --git a/crates/multi_buffer/src/path_key.rs b/crates/multi_buffer/src/path_key.rs index 530bb4aa6435fb9a3aa768d84a2bbcf829eb72c6..5d9b653a2b8c9df8c854ca01c47c57b42c159f1e 100644 --- a/crates/multi_buffer/src/path_key.rs +++ b/crates/multi_buffer/src/path_key.rs @@ -56,11 +56,7 @@ impl MultiBuffer { let excerpt_id = self.excerpts_by_path.get(path)?.first()?; let snapshot = self.read(cx); let excerpt = snapshot.excerpt(*excerpt_id)?; - Some(Anchor::in_buffer( - excerpt.id, - excerpt.buffer_id, - excerpt.range.context.start, - )) + Some(Anchor::in_buffer(excerpt.id, excerpt.range.context.start)) } pub fn excerpt_paths(&self) -> impl Iterator { @@ -263,7 +259,6 @@ impl MultiBuffer { for range in ranges.by_ref().take(range_count) { let range = Anchor::range_in_buffer( excerpt_id, - buffer_snapshot.remote_id(), buffer_snapshot.anchor_before(&range.primary.start) ..buffer_snapshot.anchor_after(&range.primary.end), ); diff --git a/crates/outline_panel/src/outline_panel.rs b/crates/outline_panel/src/outline_panel.rs index 36cd9d076bb428f37c898a142fa7f3d1da887918..cb857a72898bbd6f4161a0f4d218394efeab5c7e 100644 --- a/crates/outline_panel/src/outline_panel.rs +++ b/crates/outline_panel/src/outline_panel.rs @@ -2044,8 +2044,9 @@ impl OutlinePanel { PanelEntry::Fs(FsEntry::ExternalFile(..)) => None, PanelEntry::Search(SearchEntry { match_range, .. }) => match_range .start + .text_anchor .buffer_id - .or(match_range.end.buffer_id) + .or(match_range.end.text_anchor.buffer_id) .map(|buffer_id| { outline_panel.update(cx, |outline_panel, cx| { outline_panel diff --git a/crates/project/src/lsp_store.rs b/crates/project/src/lsp_store.rs index f5d931737dff9a873fc5d63e5445b2b5d49bab56..4f7022a264db18f96150c369fadb957556e33b75 100644 --- a/crates/project/src/lsp_store.rs +++ b/crates/project/src/lsp_store.rs @@ -2746,7 +2746,7 @@ impl LocalLspStore { let actions = lsp_store .update(cx, move |this, cx| { let request = GetCodeActions { - range: text::Anchor::MIN..text::Anchor::MAX, + range: text::Anchor::min_max_range_for_buffer(buffer.read(cx).remote_id()), kinds: Some(code_action_kinds), }; let server = LanguageServerToQuery::Other(language_server_id); diff --git a/crates/project/src/project.rs b/crates/project/src/project.rs index 149d30a5283c13f71477fc6776d5ca7f61f6205d..beebf5a1d133eb75fdd98184ddf7880b9cedc7e0 100644 --- a/crates/project/src/project.rs +++ b/crates/project/src/project.rs @@ -4002,7 +4002,8 @@ impl Project { ) -> Task>> { let snapshot = buffer_handle.read(cx).snapshot(); - let captures = snapshot.debug_variables_query(Anchor::MIN..range.end); + let captures = + snapshot.debug_variables_query(Anchor::min_for_buffer(snapshot.remote_id())..range.end); let row = snapshot .summary_for_anchor::(&range.end) diff --git a/crates/sum_tree/src/tree_map.rs b/crates/sum_tree/src/tree_map.rs index 3e56194dddd9910f819e91c209f6701b55efdd02..e58f7a65dd5d13ca67d4433bd25118ffb55d1169 100644 --- a/crates/sum_tree/src/tree_map.rs +++ b/crates/sum_tree/src/tree_map.rs @@ -72,6 +72,12 @@ impl TreeMap { self.0.insert_or_replace(MapEntry { key, value }, ()); } + pub fn insert_or_replace(&mut self, key: K, value: V) -> Option { + self.0 + .insert_or_replace(MapEntry { key, value }, ()) + .map(|it| it.value) + } + pub fn extend(&mut self, iter: impl IntoIterator) { let edits: Vec<_> = iter .into_iter() diff --git a/crates/text/src/anchor.rs b/crates/text/src/anchor.rs index 63a9ff6f1863041594fba7ebea0b3feaba6b8db7..c6d47a1e233b2fdf58fbc73adb622fc801832335 100644 --- a/crates/text/src/anchor.rs +++ b/crates/text/src/anchor.rs @@ -18,11 +18,11 @@ pub struct Anchor { impl Debug for Anchor { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - if *self == Self::MIN { - return f.write_str("Anchor::MIN"); + if self.is_min() { + return write!(f, "Anchor::min({:?})", self.buffer_id); } - if *self == Self::MAX { - return f.write_str("Anchor::MAX"); + if self.is_max() { + return write!(f, "Anchor::max({:?})", self.buffer_id); } f.debug_struct("Anchor") @@ -49,6 +49,36 @@ impl Anchor { buffer_id: None, }; + pub fn min_for_buffer(buffer_id: BufferId) -> Self { + Self { + timestamp: clock::Lamport::MIN, + offset: usize::MIN, + bias: Bias::Left, + buffer_id: Some(buffer_id), + } + } + + pub fn max_for_buffer(buffer_id: BufferId) -> Self { + Self { + timestamp: clock::Lamport::MAX, + offset: usize::MAX, + bias: Bias::Right, + buffer_id: Some(buffer_id), + } + } + + pub fn min_min_range_for_buffer(buffer_id: BufferId) -> std::ops::Range { + let min = Self::min_for_buffer(buffer_id); + min..min + } + pub fn max_max_range_for_buffer(buffer_id: BufferId) -> std::ops::Range { + let max = Self::max_for_buffer(buffer_id); + max..max + } + pub fn min_max_range_for_buffer(buffer_id: BufferId) -> std::ops::Range { + Self::min_for_buffer(buffer_id)..Self::max_for_buffer(buffer_id) + } + pub fn cmp(&self, other: &Anchor, buffer: &BufferSnapshot) -> Ordering { let fragment_id_comparison = if self.timestamp == other.timestamp { Ordering::Equal @@ -109,7 +139,7 @@ impl Anchor { /// Returns true when the [`Anchor`] is located inside a visible fragment. pub fn is_valid(&self, buffer: &BufferSnapshot) -> bool { - if *self == Anchor::MIN || *self == Anchor::MAX { + if self.is_min() || self.is_max() { true } else if self.buffer_id.is_none_or(|id| id != buffer.remote_id) { false @@ -127,6 +157,18 @@ impl Anchor { item.is_some_and(|fragment| fragment.visible) } } + + pub fn is_min(&self) -> bool { + self.timestamp == clock::Lamport::MIN + && self.offset == usize::MIN + && self.bias == Bias::Left + } + + pub fn is_max(&self) -> bool { + self.timestamp == clock::Lamport::MAX + && self.offset == usize::MAX + && self.bias == Bias::Right + } } pub trait OffsetRangeExt { diff --git a/crates/text/src/text.rs b/crates/text/src/text.rs index 5f87e5441d2bb97863b0086ac273e4d4d8acfdc9..c16c6a7b27e2b1fc4c945007395dbe26f98adcda 100644 --- a/crates/text/src/text.rs +++ b/crates/text/src/text.rs @@ -1652,10 +1652,7 @@ impl Buffer { ) -> impl 'static + Future> + use { let mut futures = Vec::new(); for anchor in anchors { - if !self.version.observed(anchor.timestamp) - && anchor != Anchor::MAX - && anchor != Anchor::MIN - { + if !self.version.observed(anchor.timestamp) && !anchor.is_max() && !anchor.is_min() { let (tx, rx) = oneshot::channel(); self.edit_id_resolvers .entry(anchor.timestamp) @@ -2258,9 +2255,9 @@ impl BufferSnapshot { let mut position = D::zero(()); anchors.map(move |(anchor, payload)| { - if *anchor == Anchor::MIN { + if anchor.is_min() { return (D::zero(()), payload); - } else if *anchor == Anchor::MAX { + } else if anchor.is_max() { return (D::from_text_summary(&self.visible_text.summary()), payload); } @@ -2318,9 +2315,9 @@ impl BufferSnapshot { } pub fn offset_for_anchor(&self, anchor: &Anchor) -> usize { - if *anchor == Anchor::MIN { + if anchor.is_min() { 0 - } else if *anchor == Anchor::MAX { + } else if anchor.is_max() { self.visible_text.len() } else { debug_assert!(anchor.buffer_id == Some(self.remote_id)); @@ -2393,9 +2390,9 @@ impl BufferSnapshot { } fn try_fragment_id_for_anchor(&self, anchor: &Anchor) -> Option<&Locator> { - if *anchor == Anchor::MIN { + if anchor.is_min() { Some(Locator::min_ref()) - } else if *anchor == Anchor::MAX { + } else if anchor.is_max() { Some(Locator::max_ref()) } else { let anchor_key = InsertionFragmentKey { @@ -2440,9 +2437,9 @@ impl BufferSnapshot { fn anchor_at_offset(&self, offset: usize, bias: Bias) -> Anchor { if bias == Bias::Left && offset == 0 { - Anchor::MIN + Anchor::min_for_buffer(self.remote_id) } else if bias == Bias::Right && offset == self.len() { - Anchor::MAX + Anchor::max_for_buffer(self.remote_id) } else { if cfg!(debug_assertions) { self.visible_text.assert_char_boundary(offset); @@ -2462,8 +2459,8 @@ impl BufferSnapshot { } pub fn can_resolve(&self, anchor: &Anchor) -> bool { - *anchor == Anchor::MIN - || *anchor == Anchor::MAX + anchor.is_min() + || anchor.is_max() || (Some(self.remote_id) == anchor.buffer_id && self.version.observed(anchor.timestamp)) } diff --git a/crates/vim/src/motion.rs b/crates/vim/src/motion.rs index b0faa7bb068135a3feafc507e4f8a6ed97863e8c..dc108b0957d993b2229e8c04fed5923e9de250d4 100644 --- a/crates/vim/src/motion.rs +++ b/crates/vim/src/motion.rs @@ -2268,17 +2268,13 @@ fn go_to_line(map: &DisplaySnapshot, display_point: DisplayPoint, line: usize) - ..language::ToOffset::to_offset(&range.context.end, buffer); if offset >= excerpt_range.start && offset <= excerpt_range.end { let text_anchor = buffer.anchor_after(offset); - let anchor = Anchor::in_buffer(excerpt, buffer.remote_id(), text_anchor); + let anchor = Anchor::in_buffer(excerpt, text_anchor); return anchor.to_display_point(map); } else if offset <= excerpt_range.start { - let anchor = Anchor::in_buffer(excerpt, buffer.remote_id(), range.context.start); + let anchor = Anchor::in_buffer(excerpt, range.context.start); return anchor.to_display_point(map); } else { - last_position = Some(Anchor::in_buffer( - excerpt, - buffer.remote_id(), - range.context.end, - )); + last_position = Some(Anchor::in_buffer(excerpt, range.context.end)); } } diff --git a/crates/vim/src/state.rs b/crates/vim/src/state.rs index eba4476ea878932518dc8a3951e04f4c6ea96d29..e96fd3a329e95311eeb73b87b53acbe76939f0cd 100644 --- a/crates/vim/src/state.rs +++ b/crates/vim/src/state.rs @@ -606,7 +606,7 @@ impl MarksState { let text_anchors = anchors.get(name)?; let anchors = text_anchors .iter() - .map(|anchor| Anchor::in_buffer(excerpt_id, buffer_id, *anchor)) + .map(|anchor| Anchor::in_buffer(excerpt_id, *anchor)) .collect(); return Some(Mark::Local(anchors)); } From 552bc027838e6b6bd60a60627878ce1eabbfc990 Mon Sep 17 00:00:00 2001 From: Jakub Konka Date: Tue, 25 Nov 2025 17:26:44 +0100 Subject: [PATCH 025/749] git: Bring back auto-commit suggestions (#43470) This got accidentally regressed in https://github.com/zed-industries/zed/pull/42149. Release Notes: - Fixed displaying auto-commit suggestions for single staged entries. --- crates/git_ui/src/git_panel.rs | 218 +++++++++++++++++++++++++++++++-- 1 file changed, 205 insertions(+), 13 deletions(-) diff --git a/crates/git_ui/src/git_panel.rs b/crates/git_ui/src/git_panel.rs index a6c6113a33b61cd16f007b6d2d818e42ad2a191e..4a5cd56ec90fd95fe94d55edfdeb7e2114fea820 100644 --- a/crates/git_ui/src/git_panel.rs +++ b/crates/git_ui/src/git_panel.rs @@ -2868,20 +2868,19 @@ impl GitPanel { if ops.staged() { self.single_staged_entry = single_staged_entry; } + } else { + self.single_staged_entry = single_staged_entry; } - } else if repo.pending_ops_summary().item_summary.staging_count == 1 { - self.single_staged_entry = repo.pending_ops().find_map(|ops| { - if ops.staging() { - repo.status_for_path(&ops.repo_path) - .map(|status| GitStatusEntry { - repo_path: ops.repo_path.clone(), - status: status.status, - staging: StageStatus::Staged, - }) - } else { - None - } - }); + } else if repo.pending_ops_summary().item_summary.staging_count == 1 + && let Some(ops) = repo.pending_ops().find(|ops| ops.staging()) + { + self.single_staged_entry = + repo.status_for_path(&ops.repo_path) + .map(|status| GitStatusEntry { + repo_path: ops.repo_path.clone(), + status: status.status, + staging: StageStatus::Staged, + }); } } @@ -5942,4 +5941,197 @@ mod tests { "}; assert_eq!(result, expected); } + + #[gpui::test] + async fn test_suggest_commit_message(cx: &mut TestAppContext) { + init_test(cx); + + let fs = FakeFs::new(cx.background_executor.clone()); + fs.insert_tree( + path!("/project"), + json!({ + ".git": {}, + "tracked": "tracked\n", + "untracked": "\n", + }), + ) + .await; + + fs.set_head_and_index_for_repo( + path!("/project/.git").as_ref(), + &[("tracked", "old tracked\n".into())], + ); + + let project = Project::test(fs.clone(), [Path::new(path!("/project"))], cx).await; + let workspace = + cx.add_window(|window, cx| Workspace::test_new(project.clone(), window, cx)); + let cx = &mut VisualTestContext::from_window(*workspace, cx); + let panel = workspace.update(cx, GitPanel::new).unwrap(); + + let handle = cx.update_window_entity(&panel, |panel, _, _| { + std::mem::replace(&mut panel.update_visible_entries_task, Task::ready(())) + }); + cx.executor().advance_clock(2 * UPDATE_DEBOUNCE); + handle.await; + + let entries = panel.read_with(cx, |panel, _| panel.entries.clone()); + + // GitPanel + // - Tracked: + // - [] tracked + // - Untracked + // - [] untracked + // + // The commit message should now read: + // "Update tracked" + let message = panel.update(cx, |panel, cx| panel.suggest_commit_message(cx)); + assert_eq!(message, Some("Update tracked".to_string())); + + let first_status_entry = entries[1].clone(); + panel.update_in(cx, |panel, window, cx| { + panel.toggle_staged_for_entry(&first_status_entry, window, cx); + }); + + cx.read(|cx| { + project + .read(cx) + .worktrees(cx) + .next() + .unwrap() + .read(cx) + .as_local() + .unwrap() + .scan_complete() + }) + .await; + + cx.executor().run_until_parked(); + + let handle = cx.update_window_entity(&panel, |panel, _, _| { + std::mem::replace(&mut panel.update_visible_entries_task, Task::ready(())) + }); + cx.executor().advance_clock(2 * UPDATE_DEBOUNCE); + handle.await; + + // GitPanel + // - Tracked: + // - [x] tracked + // - Untracked + // - [] untracked + // + // The commit message should still read: + // "Update tracked" + let message = panel.update(cx, |panel, cx| panel.suggest_commit_message(cx)); + assert_eq!(message, Some("Update tracked".to_string())); + + let second_status_entry = entries[3].clone(); + panel.update_in(cx, |panel, window, cx| { + panel.toggle_staged_for_entry(&second_status_entry, window, cx); + }); + + cx.read(|cx| { + project + .read(cx) + .worktrees(cx) + .next() + .unwrap() + .read(cx) + .as_local() + .unwrap() + .scan_complete() + }) + .await; + + cx.executor().run_until_parked(); + + let handle = cx.update_window_entity(&panel, |panel, _, _| { + std::mem::replace(&mut panel.update_visible_entries_task, Task::ready(())) + }); + cx.executor().advance_clock(2 * UPDATE_DEBOUNCE); + handle.await; + + // GitPanel + // - Tracked: + // - [x] tracked + // - Untracked + // - [x] untracked + // + // The commit message should now read: + // "Enter commit message" + // (which means we should see None returned). + let message = panel.update(cx, |panel, cx| panel.suggest_commit_message(cx)); + assert!(message.is_none()); + + panel.update_in(cx, |panel, window, cx| { + panel.toggle_staged_for_entry(&first_status_entry, window, cx); + }); + + cx.read(|cx| { + project + .read(cx) + .worktrees(cx) + .next() + .unwrap() + .read(cx) + .as_local() + .unwrap() + .scan_complete() + }) + .await; + + cx.executor().run_until_parked(); + + let handle = cx.update_window_entity(&panel, |panel, _, _| { + std::mem::replace(&mut panel.update_visible_entries_task, Task::ready(())) + }); + cx.executor().advance_clock(2 * UPDATE_DEBOUNCE); + handle.await; + + // GitPanel + // - Tracked: + // - [] tracked + // - Untracked + // - [x] untracked + // + // The commit message should now read: + // "Update untracked" + let message = panel.update(cx, |panel, cx| panel.suggest_commit_message(cx)); + assert_eq!(message, Some("Create untracked".to_string())); + + panel.update_in(cx, |panel, window, cx| { + panel.toggle_staged_for_entry(&second_status_entry, window, cx); + }); + + cx.read(|cx| { + project + .read(cx) + .worktrees(cx) + .next() + .unwrap() + .read(cx) + .as_local() + .unwrap() + .scan_complete() + }) + .await; + + cx.executor().run_until_parked(); + + let handle = cx.update_window_entity(&panel, |panel, _, _| { + std::mem::replace(&mut panel.update_visible_entries_task, Task::ready(())) + }); + cx.executor().advance_clock(2 * UPDATE_DEBOUNCE); + handle.await; + + // GitPanel + // - Tracked: + // - [] tracked + // - Untracked + // - [] untracked + // + // The commit message should now read: + // "Update tracked" + let message = panel.update(cx, |panel, cx| panel.suggest_commit_message(cx)); + assert_eq!(message, Some("Update tracked".to_string())); + } } From 2053fea0a7cebdd160c55e02e13cb9624b9f4efa Mon Sep 17 00:00:00 2001 From: "Joseph T. Lyons" Date: Tue, 25 Nov 2025 11:28:33 -0500 Subject: [PATCH 026/749] Add collaboration redirects (#43471) Redirect: https://zed.dev/docs/collaboration -> https://zed.dev/docs/collaboration/overview https://zed.dev/docs/channels -> https://zed.dev/docs/collaboration/channels Release Notes: - N/A --- docs/book.toml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/docs/book.toml b/docs/book.toml index 60ddc5ac515cb73f7b0b4f2f8c2c193bdddf228b..2bb57c5c08ea2421aa9b8a2fb47fdc9521d32a39 100644 --- a/docs/book.toml +++ b/docs/book.toml @@ -56,6 +56,10 @@ enable = false "/model-improvement.html" = "/docs/ai/ai-improvement.html" "/ai/temperature.html" = "/docs/ai/agent-settings.html#model-temperature" +# Collaboration +"/channels.html" = "/docs/collaboration/channels.html" +"/collaboration.html" = "/docs/collaboration/overview.html" + # Community "/community/feedback.html" = "/community-links" "/conversations.html" = "/community-links" From 8a992703a740caf440586eccb231575f7aa90727 Mon Sep 17 00:00:00 2001 From: Richard Feldman Date: Tue, 25 Nov 2025 12:15:55 -0500 Subject: [PATCH 027/749] Add Gemini 3 support to Copilot (#43096) Closes #43024 Release Notes: - Add support for Gemini 3 to Copilot --- crates/copilot/src/copilot_chat.rs | 20 ++- crates/copilot/src/copilot_responses.rs | 3 +- .../src/provider/copilot_chat.rs | 156 ++++++++++++++++++ 3 files changed, 171 insertions(+), 8 deletions(-) diff --git a/crates/copilot/src/copilot_chat.rs b/crates/copilot/src/copilot_chat.rs index d4051701f72331bf5fc25fcd634002f0206ba529..52a3631791ecaf4e1f7b2bc935be37816f2b25de 100644 --- a/crates/copilot/src/copilot_chat.rs +++ b/crates/copilot/src/copilot_chat.rs @@ -294,6 +294,10 @@ pub enum ChatMessage { content: ChatMessageContent, #[serde(default, skip_serializing_if = "Vec::is_empty")] tool_calls: Vec, + #[serde(default, skip_serializing_if = "Option::is_none")] + reasoning_opaque: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + reasoning_text: Option, }, User { content: ChatMessageContent, @@ -386,6 +390,8 @@ pub struct ResponseDelta { pub role: Option, #[serde(default)] pub tool_calls: Vec, + pub reasoning_opaque: Option, + pub reasoning_text: Option, } #[derive(Deserialize, Debug, Eq, PartialEq)] pub struct ToolCallChunk { @@ -786,13 +792,13 @@ async fn stream_completion( is_user_initiated: bool, ) -> Result>> { let is_vision_request = request.messages.iter().any(|message| match message { - ChatMessage::User { content } - | ChatMessage::Assistant { content, .. } - | ChatMessage::Tool { content, .. } => { - matches!(content, ChatMessageContent::Multipart(parts) if parts.iter().any(|part| matches!(part, ChatMessagePart::Image { .. }))) - } - _ => false, - }); + ChatMessage::User { content } + | ChatMessage::Assistant { content, .. } + | ChatMessage::Tool { content, .. } => { + matches!(content, ChatMessageContent::Multipart(parts) if parts.iter().any(|part| matches!(part, ChatMessagePart::Image { .. }))) + } + _ => false, + }); let request_initiator = if is_user_initiated { "user" } else { "agent" }; diff --git a/crates/copilot/src/copilot_responses.rs b/crates/copilot/src/copilot_responses.rs index 938577e224bcf4af440c3bd646cd1910ec1fbd13..2da2eb394b5fc5ba88c8dd3007df394a2dbc15bf 100644 --- a/crates/copilot/src/copilot_responses.rs +++ b/crates/copilot/src/copilot_responses.rs @@ -313,7 +313,8 @@ pub async fn stream_response( }; let is_streaming = request.stream; - let request = request_builder.body(AsyncBody::from(serde_json::to_string(&request)?))?; + let json = serde_json::to_string(&request)?; + let request = request_builder.body(AsyncBody::from(json))?; let mut response = client.send(request).await?; if !response.status().is_success() { diff --git a/crates/language_models/src/provider/copilot_chat.rs b/crates/language_models/src/provider/copilot_chat.rs index 1d0410c0cfff5f0f757120c9b91432593c8c1053..92ac342a39ff04ae42f5b01b5777a5d16563c37f 100644 --- a/crates/language_models/src/provider/copilot_chat.rs +++ b/crates/language_models/src/provider/copilot_chat.rs @@ -367,12 +367,16 @@ pub fn map_to_language_model_completion_events( struct State { events: Pin>>>, tool_calls_by_index: HashMap, + reasoning_opaque: Option, + reasoning_text: Option, } futures::stream::unfold( State { events, tool_calls_by_index: HashMap::default(), + reasoning_opaque: None, + reasoning_text: None, }, move |mut state| async move { if let Some(event) = state.events.next().await { @@ -403,6 +407,14 @@ pub fn map_to_language_model_completion_events( events.push(Ok(LanguageModelCompletionEvent::Text(content))); } + // Capture reasoning data from the delta (e.g. for Gemini 3) + if let Some(opaque) = delta.reasoning_opaque.clone() { + state.reasoning_opaque = Some(opaque); + } + if let Some(text) = delta.reasoning_text.clone() { + state.reasoning_text = Some(text); + } + for (index, tool_call) in delta.tool_calls.iter().enumerate() { let tool_index = tool_call.index.unwrap_or(index); let entry = state.tool_calls_by_index.entry(tool_index).or_default(); @@ -445,6 +457,32 @@ pub fn map_to_language_model_completion_events( ))); } Some("tool_calls") => { + // Gemini 3 models send reasoning_opaque/reasoning_text that must + // be preserved and sent back in subsequent requests. Emit as + // ReasoningDetails so the agent stores it in the message. + if state.reasoning_opaque.is_some() + || state.reasoning_text.is_some() + { + let mut details = serde_json::Map::new(); + if let Some(opaque) = state.reasoning_opaque.take() { + details.insert( + "reasoning_opaque".to_string(), + serde_json::Value::String(opaque), + ); + } + if let Some(text) = state.reasoning_text.take() { + details.insert( + "reasoning_text".to_string(), + serde_json::Value::String(text), + ); + } + events.push(Ok( + LanguageModelCompletionEvent::ReasoningDetails( + serde_json::Value::Object(details), + ), + )); + } + events.extend(state.tool_calls_by_index.drain().map( |(_, tool_call)| { // The model can output an empty string @@ -807,6 +845,22 @@ fn into_copilot_chat( buffer }; + // Extract reasoning_opaque and reasoning_text from reasoning_details + let (reasoning_opaque, reasoning_text) = + if let Some(details) = &message.reasoning_details { + let opaque = details + .get("reasoning_opaque") + .and_then(|v| v.as_str()) + .map(|s| s.to_string()); + let text = details + .get("reasoning_text") + .and_then(|v| v.as_str()) + .map(|s| s.to_string()); + (opaque, text) + } else { + (None, None) + }; + messages.push(ChatMessage::Assistant { content: if text_content.is_empty() { ChatMessageContent::empty() @@ -814,6 +868,8 @@ fn into_copilot_chat( text_content.into() }, tool_calls, + reasoning_opaque, + reasoning_text, }); } Role::System => messages.push(ChatMessage::System { @@ -1317,6 +1373,106 @@ mod tests { other => panic!("expected HttpResponseError, got {:?}", other), } } + + #[test] + fn chat_completions_stream_maps_reasoning_data() { + use copilot::copilot_chat::ResponseEvent; + + let events = vec![ + ResponseEvent { + choices: vec![copilot::copilot_chat::ResponseChoice { + index: Some(0), + finish_reason: None, + delta: Some(copilot::copilot_chat::ResponseDelta { + content: None, + role: Some(copilot::copilot_chat::Role::Assistant), + tool_calls: vec![copilot::copilot_chat::ToolCallChunk { + index: Some(0), + id: Some("call_abc123".to_string()), + function: Some(copilot::copilot_chat::FunctionChunk { + name: Some("list_directory".to_string()), + arguments: Some("{\"path\":\"test\"}".to_string()), + thought_signature: None, + }), + }], + reasoning_opaque: Some("encrypted_reasoning_token_xyz".to_string()), + reasoning_text: Some("Let me check the directory".to_string()), + }), + message: None, + }], + id: "chatcmpl-123".to_string(), + usage: None, + }, + ResponseEvent { + choices: vec![copilot::copilot_chat::ResponseChoice { + index: Some(0), + finish_reason: Some("tool_calls".to_string()), + delta: Some(copilot::copilot_chat::ResponseDelta { + content: None, + role: None, + tool_calls: vec![], + reasoning_opaque: None, + reasoning_text: None, + }), + message: None, + }], + id: "chatcmpl-123".to_string(), + usage: None, + }, + ]; + + let mapped = futures::executor::block_on(async { + map_to_language_model_completion_events( + Box::pin(futures::stream::iter(events.into_iter().map(Ok))), + true, + ) + .collect::>() + .await + }); + + let mut has_reasoning_details = false; + let mut has_tool_use = false; + let mut reasoning_opaque_value: Option = None; + let mut reasoning_text_value: Option = None; + + for event_result in mapped { + match event_result { + Ok(LanguageModelCompletionEvent::ReasoningDetails(details)) => { + has_reasoning_details = true; + reasoning_opaque_value = details + .get("reasoning_opaque") + .and_then(|v| v.as_str()) + .map(|s| s.to_string()); + reasoning_text_value = details + .get("reasoning_text") + .and_then(|v| v.as_str()) + .map(|s| s.to_string()); + } + Ok(LanguageModelCompletionEvent::ToolUse(tool_use)) => { + has_tool_use = true; + assert_eq!(tool_use.id.to_string(), "call_abc123"); + assert_eq!(tool_use.name.as_ref(), "list_directory"); + } + _ => {} + } + } + + assert!( + has_reasoning_details, + "Should emit ReasoningDetails event for Gemini 3 reasoning" + ); + assert!(has_tool_use, "Should emit ToolUse event"); + assert_eq!( + reasoning_opaque_value, + Some("encrypted_reasoning_token_xyz".to_string()), + "Should capture reasoning_opaque" + ); + assert_eq!( + reasoning_text_value, + Some("Let me check the directory".to_string()), + "Should capture reasoning_text" + ); + } } struct ConfigurationView { copilot_status: Option, From 1c072017a468d04b500da857bf4245832b0f8bf6 Mon Sep 17 00:00:00 2001 From: Anthony Eid <56899983+Anthony-Eid@users.noreply.github.com> Date: Tue, 25 Nov 2025 12:29:26 -0500 Subject: [PATCH 028/749] git: Make the version_control.{deleted/added} colors more accessible (#43475) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The new colors are easier to tell apart for people that are colorblind cc: @mattermill ## One Dark ### Before Screenshot 2025-11-25 at 12 13 14 PM ### After Screenshot 2025-11-25 at 12 14 16 PM ## One Light ### Before Screenshot 2025-11-25 at 12 15 13 PM ### After Screenshot 2025-11-25 at 12 15 45 PM Release Notes: - N/A --- assets/themes/one/one.json | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/assets/themes/one/one.json b/assets/themes/one/one.json index 6849cd05dc70752216789ae04e81fad232f7b14b..48db749a4b636963d6db714ddb055c9c15bc5494 100644 --- a/assets/themes/one/one.json +++ b/assets/themes/one/one.json @@ -96,9 +96,9 @@ "terminal.ansi.bright_white": "#fafafaff", "terminal.ansi.dim_white": "#575d65ff", "link_text.hover": "#74ade8ff", - "version_control.added": "#27a657ff", + "version_control.added": "#2EA048ff", "version_control.modified": "#d3b020ff", - "version_control.deleted": "#e06c76ff", + "version_control.deleted": "#78081Bff", "version_control.conflict_marker.ours": "#a1c1811a", "version_control.conflict_marker.theirs": "#74ade81a", "conflict": "#dec184ff", @@ -497,9 +497,9 @@ "terminal.ansi.bright_white": "#ffffffff", "terminal.ansi.dim_white": "#aaaaaaff", "link_text.hover": "#5c78e2ff", - "version_control.added": "#27a657ff", + "version_control.added": "#2EA048ff", "version_control.modified": "#d3b020ff", - "version_control.deleted": "#e06c76ff", + "version_control.deleted": "#F85149ff", "conflict": "#a48819ff", "conflict.background": "#faf2e6ff", "conflict.border": "#f4e7d1ff", From 94f9b8585969da841cccb341ad766c1d2d852816 Mon Sep 17 00:00:00 2001 From: Bennet Bo Fenner Date: Tue, 25 Nov 2025 18:29:45 +0100 Subject: [PATCH 029/749] acp: Only pass enabled MCP servers to agent (#43467) Release Notes: - Fix an issue where ACP agents would start MCP servers that were disabled in Zed --- crates/project/src/context_server_store.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/crates/project/src/context_server_store.rs b/crates/project/src/context_server_store.rs index 342a59ab7d5530e8f2268f1c4b72ea44f302f807..59bef36f06502f11d06f76ac7819a4c9ea806176 100644 --- a/crates/project/src/context_server_store.rs +++ b/crates/project/src/context_server_store.rs @@ -199,12 +199,12 @@ impl ContextServerStore { ) } - /// Returns all configured context server ids, regardless of enabled state. + /// Returns all configured context server ids, excluding the ones that are disabled pub fn configured_server_ids(&self) -> Vec { self.context_server_settings - .keys() - .cloned() - .map(ContextServerId) + .iter() + .filter(|(_, settings)| settings.enabled()) + .map(|(id, _)| ContextServerId(id.clone())) .collect() } From 388fda2292ccbe381c18473030461420f06c36cd Mon Sep 17 00:00:00 2001 From: Smit Barmase Date: Tue, 25 Nov 2025 23:27:11 +0530 Subject: [PATCH 030/749] editor: Fix package version completion partial accept and improve sorting (#43473) Closes #41723 This PR fixes an issue with accepting partial semver completions by including `.` in the completion query. This makes the editor treat the entire version string as the query, instead of breaking segment at last `.` . This PR also adds a test for sorting semver completions. The actual sorting fix is handled in the `package-version-server` by having it provide `sort_text`. More: https://github.com/zed-industries/package-version-server/pull/10 image Release Notes: - Fixed an issue where accepting a completion for a semver version in package.json would append the suggestion to the existing text instead of replacing it. - Improved the sorting of semver completions in package.json so the latest versions appear at the top. --- crates/editor/src/code_completion_tests.rs | 122 +++++++++++++++++++-- crates/languages/src/json/config.toml | 3 +- 2 files changed, 117 insertions(+), 8 deletions(-) diff --git a/crates/editor/src/code_completion_tests.rs b/crates/editor/src/code_completion_tests.rs index 364b310f367ff195f9aee8693a815be94db0b44d..4602824486ebb88f78ed529abb91ddcc1c34646f 100644 --- a/crates/editor/src/code_completion_tests.rs +++ b/crates/editor/src/code_completion_tests.rs @@ -239,6 +239,89 @@ async fn test_fuzzy_over_sort_positions(cx: &mut TestAppContext) { assert_eq!(matches[2].string, "fetch_code_lens"); } +#[gpui::test] +async fn test_semver_label_sort_by_latest_version(cx: &mut TestAppContext) { + let mut versions = [ + "10.4.112", + "10.4.22", + "10.4.2", + "10.4.20", + "10.4.21", + "10.4.12", + // Pre-release versions + "10.4.22-alpha", + "10.4.22-beta.1", + "10.4.22-rc.1", + // Build metadata versions + "10.4.21+build.123", + "10.4.20+20210327", + ]; + versions.sort_by(|a, b| { + match ( + semver::Version::parse(a).ok(), + semver::Version::parse(b).ok(), + ) { + (Some(a_ver), Some(b_ver)) => b_ver.cmp(&a_ver), + _ => std::cmp::Ordering::Equal, + } + }); + let completions: Vec<_> = versions + .iter() + .enumerate() + .map(|(i, version)| { + // This sort text would come from the LSP + let sort_text = format!("{:08}", i); + CompletionBuilder::new(version, None, &sort_text, None) + }) + .collect(); + + // Case 1: User types just the major and minor version + let matches = + filter_and_sort_matches("10.4.", &completions, SnippetSortOrder::default(), cx).await; + // Versions are ordered by recency (latest first) + let expected_versions = [ + "10.4.112", + "10.4.22", + "10.4.22-rc.1", + "10.4.22-beta.1", + "10.4.22-alpha", + "10.4.21+build.123", + "10.4.21", + "10.4.20+20210327", + "10.4.20", + "10.4.12", + "10.4.2", + ]; + for (match_item, expected) in matches.iter().zip(expected_versions.iter()) { + assert_eq!(match_item.string.as_ref() as &str, *expected); + } + + // Case 2: User types the major, minor, and patch version + let matches = + filter_and_sort_matches("10.4.2", &completions, SnippetSortOrder::default(), cx).await; + let expected_versions = [ + // Exact match comes first + "10.4.2", + // Ordered by recency with exact major, minor, and patch versions + "10.4.22", + "10.4.22-rc.1", + "10.4.22-beta.1", + "10.4.22-alpha", + "10.4.21+build.123", + "10.4.21", + "10.4.20+20210327", + "10.4.20", + // Versions with non-exact patch versions are ordered by fuzzy score + // Higher fuzzy score than 112 patch version since "2" appears before "1" + // in "12", making it rank higher than "112" + "10.4.12", + "10.4.112", + ]; + for (match_item, expected) in matches.iter().zip(expected_versions.iter()) { + assert_eq!(match_item.string.as_ref() as &str, *expected); + } +} + async fn test_for_each_prefix( target: &str, completions: &Vec, @@ -259,30 +342,55 @@ struct CompletionBuilder; impl CompletionBuilder { fn constant(label: &str, filter_text: Option<&str>, sort_text: &str) -> Completion { - Self::new(label, filter_text, sort_text, CompletionItemKind::CONSTANT) + Self::new( + label, + filter_text, + sort_text, + Some(CompletionItemKind::CONSTANT), + ) } fn function(label: &str, filter_text: Option<&str>, sort_text: &str) -> Completion { - Self::new(label, filter_text, sort_text, CompletionItemKind::FUNCTION) + Self::new( + label, + filter_text, + sort_text, + Some(CompletionItemKind::FUNCTION), + ) } fn method(label: &str, filter_text: Option<&str>, sort_text: &str) -> Completion { - Self::new(label, filter_text, sort_text, CompletionItemKind::METHOD) + Self::new( + label, + filter_text, + sort_text, + Some(CompletionItemKind::METHOD), + ) } fn variable(label: &str, filter_text: Option<&str>, sort_text: &str) -> Completion { - Self::new(label, filter_text, sort_text, CompletionItemKind::VARIABLE) + Self::new( + label, + filter_text, + sort_text, + Some(CompletionItemKind::VARIABLE), + ) } fn snippet(label: &str, filter_text: Option<&str>, sort_text: &str) -> Completion { - Self::new(label, filter_text, sort_text, CompletionItemKind::SNIPPET) + Self::new( + label, + filter_text, + sort_text, + Some(CompletionItemKind::SNIPPET), + ) } fn new( label: &str, filter_text: Option<&str>, sort_text: &str, - kind: CompletionItemKind, + kind: Option, ) -> Completion { Completion { replace_range: Anchor::MIN..Anchor::MAX, @@ -294,7 +402,7 @@ impl CompletionBuilder { server_id: LanguageServerId(0), lsp_completion: Box::new(CompletionItem { label: label.to_string(), - kind: Some(kind), + kind: kind, sort_text: Some(sort_text.to_string()), filter_text: filter_text.map(|text| text.to_string()), ..Default::default() diff --git a/crates/languages/src/json/config.toml b/crates/languages/src/json/config.toml index 1cf815704cd1d0ecd25c25e00d5925c13ff0cf35..8caa46c8a45076557d5f6c897fc1a5ad11ffa6ac 100644 --- a/crates/languages/src/json/config.toml +++ b/crates/languages/src/json/config.toml @@ -11,5 +11,6 @@ brackets = [ tab_size = 2 prettier_parser_name = "json" debuggers = ["JavaScript"] + [overrides.string] -completion_query_characters = [":", " "] +completion_query_characters = [":", " ", "."] From 36708c910a0b4f20a85338d423f9710b0cc27780 Mon Sep 17 00:00:00 2001 From: Max Brunsfeld Date: Tue, 25 Nov 2025 10:36:45 -0800 Subject: [PATCH 031/749] Separate experimental edit prediction jumps feature from the Sweep AI prediction provider (#43481) Release Notes: - N/A --------- Co-authored-by: Ben Kunkle --- crates/zeta/src/provider.rs | 2 +- crates/zeta/src/sweep_ai.rs | 283 ++++++++++++++++++++++- crates/zeta/src/zeta.rs | 320 ++++++-------------------- crates/zeta/src/zeta1.rs | 6 +- crates/zeta2_tools/src/zeta2_tools.rs | 79 ++++--- 5 files changed, 381 insertions(+), 309 deletions(-) diff --git a/crates/zeta/src/provider.rs b/crates/zeta/src/provider.rs index a2b3eed1b5efe953ebdf5a2448ca06e7866bea86..76c950714afa808ea04cf5fead89979374f2b99b 100644 --- a/crates/zeta/src/provider.rs +++ b/crates/zeta/src/provider.rs @@ -77,7 +77,7 @@ impl EditPredictionProvider for ZetaEditPredictionProvider { ) -> bool { let zeta = self.zeta.read(cx); if zeta.edit_prediction_model == ZetaEditPredictionModel::Sweep { - zeta.sweep_api_token.is_some() + zeta.sweep_ai.api_token.is_some() } else { true } diff --git a/crates/zeta/src/sweep_ai.rs b/crates/zeta/src/sweep_ai.rs index 0e226ab9df26ffc945a2d8e810790d0b00d0f198..49870c7c9e917468e70062cbc234e9774fb3668b 100644 --- a/crates/zeta/src/sweep_ai.rs +++ b/crates/zeta/src/sweep_ai.rs @@ -1,10 +1,269 @@ -use std::fmt; -use std::{path::Path, sync::Arc}; - +use anyhow::{Context as _, Result}; +use cloud_llm_client::predict_edits_v3::Event; +use futures::AsyncReadExt as _; +use gpui::{ + App, AppContext as _, Entity, Task, + http_client::{self, AsyncBody, Method}, +}; +use language::{Buffer, BufferSnapshot, Point, ToOffset as _, ToPoint as _}; +use lsp::DiagnosticSeverity; +use project::{Project, ProjectPath}; use serde::{Deserialize, Serialize}; +use std::{ + collections::VecDeque, + fmt::{self, Write as _}, + ops::Range, + path::Path, + sync::Arc, + time::Instant, +}; +use util::ResultExt as _; + +use crate::{EditPrediction, EditPredictionId, EditPredictionInputs}; + +const SWEEP_API_URL: &str = "https://autocomplete.sweep.dev/backend/next_edit_autocomplete"; + +pub struct SweepAi { + pub api_token: Option, + pub debug_info: Arc, +} + +impl SweepAi { + pub fn new(cx: &App) -> Self { + SweepAi { + api_token: std::env::var("SWEEP_AI_TOKEN") + .context("No SWEEP_AI_TOKEN environment variable set") + .log_err(), + debug_info: debug_info(cx), + } + } + + pub fn request_prediction_with_sweep( + &self, + project: &Entity, + active_buffer: &Entity, + snapshot: BufferSnapshot, + position: language::Anchor, + events: Vec>, + recent_paths: &VecDeque, + diagnostic_search_range: Range, + cx: &mut App, + ) -> Task>> { + let debug_info = self.debug_info.clone(); + let Some(api_token) = self.api_token.clone() else { + return Task::ready(Ok(None)); + }; + let full_path: Arc = snapshot + .file() + .map(|file| file.full_path(cx)) + .unwrap_or_else(|| "untitled".into()) + .into(); + + let project_file = project::File::from_dyn(snapshot.file()); + let repo_name = project_file + .map(|file| file.worktree.read(cx).root_name_str()) + .unwrap_or("untitled") + .into(); + let offset = position.to_offset(&snapshot); + + let recent_buffers = recent_paths.iter().cloned(); + let http_client = cx.http_client(); + + let recent_buffer_snapshots = recent_buffers + .filter_map(|project_path| { + let buffer = project.read(cx).get_open_buffer(&project_path, cx)?; + if active_buffer == &buffer { + None + } else { + Some(buffer.read(cx).snapshot()) + } + }) + .take(3) + .collect::>(); + + let cursor_point = position.to_point(&snapshot); + let buffer_snapshotted_at = Instant::now(); + + let result = cx.background_spawn(async move { + let text = snapshot.text(); + + let mut recent_changes = String::new(); + for event in &events { + write_event(event.as_ref(), &mut recent_changes).unwrap(); + } + + let mut file_chunks = recent_buffer_snapshots + .into_iter() + .map(|snapshot| { + let end_point = Point::new(30, 0).min(snapshot.max_point()); + FileChunk { + content: snapshot.text_for_range(Point::zero()..end_point).collect(), + file_path: snapshot + .file() + .map(|f| f.path().as_unix_str()) + .unwrap_or("untitled") + .to_string(), + start_line: 0, + end_line: end_point.row as usize, + timestamp: snapshot.file().and_then(|file| { + Some( + file.disk_state() + .mtime()? + .to_seconds_and_nanos_for_persistence()? + .0, + ) + }), + } + }) + .collect::>(); + + let diagnostic_entries = snapshot.diagnostics_in_range(diagnostic_search_range, false); + let mut diagnostic_content = String::new(); + let mut diagnostic_count = 0; + + for entry in diagnostic_entries { + let start_point: Point = entry.range.start; + + let severity = match entry.diagnostic.severity { + DiagnosticSeverity::ERROR => "error", + DiagnosticSeverity::WARNING => "warning", + DiagnosticSeverity::INFORMATION => "info", + DiagnosticSeverity::HINT => "hint", + _ => continue, + }; + + diagnostic_count += 1; + + writeln!( + &mut diagnostic_content, + "{} at line {}: {}", + severity, + start_point.row + 1, + entry.diagnostic.message + )?; + } + + if !diagnostic_content.is_empty() { + file_chunks.push(FileChunk { + file_path: format!("Diagnostics for {}", full_path.display()), + start_line: 0, + end_line: diagnostic_count, + content: diagnostic_content, + timestamp: None, + }); + } + + let request_body = AutocompleteRequest { + debug_info, + repo_name, + file_path: full_path.clone(), + file_contents: text.clone(), + original_file_contents: text, + cursor_position: offset, + recent_changes: recent_changes.clone(), + changes_above_cursor: true, + multiple_suggestions: false, + branch: None, + file_chunks, + retrieval_chunks: vec![], + recent_user_actions: vec![], + // TODO + privacy_mode_enabled: false, + }; + + let mut buf: Vec = Vec::new(); + let writer = brotli::CompressorWriter::new(&mut buf, 4096, 11, 22); + serde_json::to_writer(writer, &request_body)?; + let body: AsyncBody = buf.into(); + + let inputs = EditPredictionInputs { + events, + included_files: vec![cloud_llm_client::predict_edits_v3::IncludedFile { + path: full_path.clone(), + max_row: cloud_llm_client::predict_edits_v3::Line(snapshot.max_point().row), + excerpts: vec![cloud_llm_client::predict_edits_v3::Excerpt { + start_line: cloud_llm_client::predict_edits_v3::Line(0), + text: request_body.file_contents.into(), + }], + }], + cursor_point: cloud_llm_client::predict_edits_v3::Point { + column: cursor_point.column, + line: cloud_llm_client::predict_edits_v3::Line(cursor_point.row), + }, + cursor_path: full_path.clone(), + }; + + let request = http_client::Request::builder() + .uri(SWEEP_API_URL) + .header("Content-Type", "application/json") + .header("Authorization", format!("Bearer {}", api_token)) + .header("Connection", "keep-alive") + .header("Content-Encoding", "br") + .method(Method::POST) + .body(body)?; + + let mut response = http_client.send(request).await?; + + let mut body: Vec = Vec::new(); + response.body_mut().read_to_end(&mut body).await?; + + let response_received_at = Instant::now(); + if !response.status().is_success() { + anyhow::bail!( + "Request failed with status: {:?}\nBody: {}", + response.status(), + String::from_utf8_lossy(&body), + ); + }; + + let response: AutocompleteResponse = serde_json::from_slice(&body)?; + + let old_text = snapshot + .text_for_range(response.start_index..response.end_index) + .collect::(); + let edits = language::text_diff(&old_text, &response.completion) + .into_iter() + .map(|(range, text)| { + ( + snapshot.anchor_after(response.start_index + range.start) + ..snapshot.anchor_before(response.start_index + range.end), + text, + ) + }) + .collect::>(); + + anyhow::Ok(( + response.autocomplete_id, + edits, + snapshot, + response_received_at, + inputs, + )) + }); + + let buffer = active_buffer.clone(); + + cx.spawn(async move |cx| { + let (id, edits, old_snapshot, response_received_at, inputs) = result.await?; + anyhow::Ok( + EditPrediction::new( + EditPredictionId(id.into()), + &buffer, + &old_snapshot, + edits.into(), + buffer_snapshotted_at, + response_received_at, + inputs, + cx, + ) + .await, + ) + }) + } +} #[derive(Debug, Clone, Serialize)] -pub struct AutocompleteRequest { +struct AutocompleteRequest { pub debug_info: Arc, pub repo_name: String, pub branch: Option, @@ -22,7 +281,7 @@ pub struct AutocompleteRequest { } #[derive(Debug, Clone, Serialize)] -pub struct FileChunk { +struct FileChunk { pub file_path: String, pub start_line: usize, pub end_line: usize, @@ -31,7 +290,7 @@ pub struct FileChunk { } #[derive(Debug, Clone, Serialize)] -pub struct RetrievalChunk { +struct RetrievalChunk { pub file_path: String, pub start_line: usize, pub end_line: usize, @@ -40,7 +299,7 @@ pub struct RetrievalChunk { } #[derive(Debug, Clone, Serialize)] -pub struct UserAction { +struct UserAction { pub action_type: ActionType, pub line_number: usize, pub offset: usize, @@ -51,7 +310,7 @@ pub struct UserAction { #[allow(dead_code)] #[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize)] #[serde(rename_all = "SCREAMING_SNAKE_CASE")] -pub enum ActionType { +enum ActionType { CursorMovement, InsertChar, DeleteChar, @@ -60,7 +319,7 @@ pub enum ActionType { } #[derive(Debug, Clone, Deserialize)] -pub struct AutocompleteResponse { +struct AutocompleteResponse { pub autocomplete_id: String, pub start_index: usize, pub end_index: usize, @@ -80,7 +339,7 @@ pub struct AutocompleteResponse { #[allow(dead_code)] #[derive(Debug, Clone, Deserialize)] -pub struct AdditionalCompletion { +struct AdditionalCompletion { pub start_index: usize, pub end_index: usize, pub completion: String, @@ -90,7 +349,7 @@ pub struct AdditionalCompletion { pub finish_reason: Option, } -pub(crate) fn write_event( +fn write_event( event: &cloud_llm_client::predict_edits_v3::Event, f: &mut impl fmt::Write, ) -> fmt::Result { @@ -115,7 +374,7 @@ pub(crate) fn write_event( } } -pub(crate) fn debug_info(cx: &gpui::App) -> Arc { +fn debug_info(cx: &gpui::App) -> Arc { format!( "Zed v{version} ({sha}) - OS: {os} - Zed v{version}", version = release_channel::AppVersion::global(cx), diff --git a/crates/zeta/src/zeta.rs b/crates/zeta/src/zeta.rs index 6464ce19ebaf1f95ad58e2954fb68e934600dac4..78169821f0e6cddd51bcd2075d766c6d2e0f2e71 100644 --- a/crates/zeta/src/zeta.rs +++ b/crates/zeta/src/zeta.rs @@ -30,7 +30,6 @@ use language::{ }; use language::{BufferSnapshot, OffsetRangeExt}; use language_model::{LlmApiToken, RefreshLlmTokenListener}; -use lsp::DiagnosticSeverity; use open_ai::FunctionDefinition; use project::{DisableAiSettings, Project, ProjectPath, WorktreeId}; use release_channel::AppVersion; @@ -42,7 +41,6 @@ use std::collections::{VecDeque, hash_map}; use telemetry_events::EditPredictionRating; use workspace::Workspace; -use std::fmt::Write as _; use std::ops::Range; use std::path::Path; use std::rc::Rc; @@ -80,6 +78,7 @@ use crate::rate_prediction_modal::{ NextEdit, PreviousEdit, RatePredictionsModal, ThumbsDownActivePrediction, ThumbsUpActivePrediction, }; +use crate::sweep_ai::SweepAi; use crate::zeta1::request_prediction_with_zeta1; pub use provider::ZetaEditPredictionProvider; @@ -171,7 +170,7 @@ impl FeatureFlag for Zeta2FeatureFlag { const NAME: &'static str = "zeta2"; fn enabled_for_staff() -> bool { - false + true } } @@ -192,8 +191,7 @@ pub struct Zeta { #[cfg(feature = "eval-support")] eval_cache: Option>, edit_prediction_model: ZetaEditPredictionModel, - sweep_api_token: Option, - sweep_ai_debug_info: Arc, + sweep_ai: SweepAi, data_collection_choice: DataCollectionChoice, rejected_predictions: Vec, reject_predictions_tx: mpsc::UnboundedSender<()>, @@ -202,7 +200,7 @@ pub struct Zeta { rated_predictions: HashSet, } -#[derive(Default, PartialEq, Eq)] +#[derive(Copy, Clone, Default, PartialEq, Eq)] pub enum ZetaEditPredictionModel { #[default] Zeta1, @@ -499,11 +497,8 @@ impl Zeta { #[cfg(feature = "eval-support")] eval_cache: None, edit_prediction_model: ZetaEditPredictionModel::Zeta2, - sweep_api_token: std::env::var("SWEEP_AI_TOKEN") - .context("No SWEEP_AI_TOKEN environment variable set") - .log_err(), + sweep_ai: SweepAi::new(cx), data_collection_choice, - sweep_ai_debug_info: sweep_ai::debug_info(cx), rejected_predictions: Vec::new(), reject_predictions_debounce_task: None, reject_predictions_tx: reject_tx, @@ -517,7 +512,7 @@ impl Zeta { } pub fn has_sweep_api_token(&self) -> bool { - self.sweep_api_token.is_some() + self.sweep_ai.api_token.is_some() } #[cfg(feature = "eval-support")] @@ -643,7 +638,9 @@ impl Zeta { } } project::Event::DiagnosticsUpdated { .. } => { - self.refresh_prediction_from_diagnostics(project, cx); + if cx.has_flag::() { + self.refresh_prediction_from_diagnostics(project, cx); + } } _ => (), } @@ -1183,249 +1180,77 @@ impl Zeta { position: language::Anchor, cx: &mut Context, ) -> Task>> { - match self.edit_prediction_model { - ZetaEditPredictionModel::Zeta1 => { - request_prediction_with_zeta1(self, project, active_buffer, position, cx) - } - ZetaEditPredictionModel::Zeta2 => { - self.request_prediction_with_zeta2(project, active_buffer, position, cx) - } - ZetaEditPredictionModel::Sweep => { - self.request_prediction_with_sweep(project, active_buffer, position, true, cx) - } - } + self.request_prediction_internal( + project.clone(), + active_buffer.clone(), + position, + cx.has_flag::(), + cx, + ) } - fn request_prediction_with_sweep( + fn request_prediction_internal( &mut self, - project: &Entity, - active_buffer: &Entity, + project: Entity, + active_buffer: Entity, position: language::Anchor, allow_jump: bool, cx: &mut Context, ) -> Task>> { - let snapshot = active_buffer.read(cx).snapshot(); - let debug_info = self.sweep_ai_debug_info.clone(); - let Some(api_token) = self.sweep_api_token.clone() else { - return Task::ready(Ok(None)); - }; - let full_path: Arc = snapshot - .file() - .map(|file| file.full_path(cx)) - .unwrap_or_else(|| "untitled".into()) - .into(); - - let project_file = project::File::from_dyn(snapshot.file()); - let repo_name = project_file - .map(|file| file.worktree.read(cx).root_name_str()) - .unwrap_or("untitled") - .into(); - let offset = position.to_offset(&snapshot); + const DIAGNOSTIC_LINES_RANGE: u32 = 20; - let project_state = self.get_or_init_zeta_project(project, cx); - let events = project_state.events(cx); + self.get_or_init_zeta_project(&project, cx); + let zeta_project = self.projects.get(&project.entity_id()).unwrap(); + let events = zeta_project.events(cx); let has_events = !events.is_empty(); - let recent_buffers = project_state.recent_paths.iter().cloned(); - let http_client = cx.http_client(); - - let recent_buffer_snapshots = recent_buffers - .filter_map(|project_path| { - let buffer = project.read(cx).get_open_buffer(&project_path, cx)?; - if active_buffer == &buffer { - None - } else { - Some(buffer.read(cx).snapshot()) - } - }) - .take(3) - .collect::>(); - - const DIAGNOSTIC_LINES_RANGE: u32 = 20; + let snapshot = active_buffer.read(cx).snapshot(); let cursor_point = position.to_point(&snapshot); let diagnostic_search_start = cursor_point.row.saturating_sub(DIAGNOSTIC_LINES_RANGE); let diagnostic_search_end = cursor_point.row + DIAGNOSTIC_LINES_RANGE; let diagnostic_search_range = Point::new(diagnostic_search_start, 0)..Point::new(diagnostic_search_end, 0); - let buffer_snapshotted_at = Instant::now(); - - let result = cx.background_spawn({ - let snapshot = snapshot.clone(); - let diagnostic_search_range = diagnostic_search_range.clone(); - async move { - let text = snapshot.text(); - - let mut recent_changes = String::new(); - for event in &events { - sweep_ai::write_event(event.as_ref(), &mut recent_changes).unwrap(); - } - - let mut file_chunks = recent_buffer_snapshots - .into_iter() - .map(|snapshot| { - let end_point = Point::new(30, 0).min(snapshot.max_point()); - sweep_ai::FileChunk { - content: snapshot.text_for_range(Point::zero()..end_point).collect(), - file_path: snapshot - .file() - .map(|f| f.path().as_unix_str()) - .unwrap_or("untitled") - .to_string(), - start_line: 0, - end_line: end_point.row as usize, - timestamp: snapshot.file().and_then(|file| { - Some( - file.disk_state() - .mtime()? - .to_seconds_and_nanos_for_persistence()? - .0, - ) - }), - } - }) - .collect::>(); - - let diagnostic_entries = - snapshot.diagnostics_in_range(diagnostic_search_range, false); - let mut diagnostic_content = String::new(); - let mut diagnostic_count = 0; - - for entry in diagnostic_entries { - let start_point: Point = entry.range.start; - - let severity = match entry.diagnostic.severity { - DiagnosticSeverity::ERROR => "error", - DiagnosticSeverity::WARNING => "warning", - DiagnosticSeverity::INFORMATION => "info", - DiagnosticSeverity::HINT => "hint", - _ => continue, - }; - - diagnostic_count += 1; - - writeln!( - &mut diagnostic_content, - "{} at line {}: {}", - severity, - start_point.row + 1, - entry.diagnostic.message - )?; - } - - if !diagnostic_content.is_empty() { - file_chunks.push(sweep_ai::FileChunk { - file_path: format!("Diagnostics for {}", full_path.display()), - start_line: 0, - end_line: diagnostic_count, - content: diagnostic_content, - timestamp: None, - }); - } - - let request_body = sweep_ai::AutocompleteRequest { - debug_info, - repo_name, - file_path: full_path.clone(), - file_contents: text.clone(), - original_file_contents: text, - cursor_position: offset, - recent_changes: recent_changes.clone(), - changes_above_cursor: true, - multiple_suggestions: false, - branch: None, - file_chunks, - retrieval_chunks: vec![], - recent_user_actions: vec![], - // TODO - privacy_mode_enabled: false, - }; - let mut buf: Vec = Vec::new(); - let writer = brotli::CompressorWriter::new(&mut buf, 4096, 11, 22); - serde_json::to_writer(writer, &request_body)?; - let body: AsyncBody = buf.into(); - - let inputs = EditPredictionInputs { - events, - included_files: vec![cloud_llm_client::predict_edits_v3::IncludedFile { - path: full_path.clone(), - max_row: cloud_llm_client::predict_edits_v3::Line(snapshot.max_point().row), - excerpts: vec![cloud_llm_client::predict_edits_v3::Excerpt { - start_line: cloud_llm_client::predict_edits_v3::Line(0), - text: request_body.file_contents.into(), - }], - }], - cursor_point: cloud_llm_client::predict_edits_v3::Point { - column: cursor_point.column, - line: cloud_llm_client::predict_edits_v3::Line(cursor_point.row), - }, - cursor_path: full_path.clone(), - }; - - const SWEEP_API_URL: &str = - "https://autocomplete.sweep.dev/backend/next_edit_autocomplete"; - - let request = http_client::Request::builder() - .uri(SWEEP_API_URL) - .header("Content-Type", "application/json") - .header("Authorization", format!("Bearer {}", api_token)) - .header("Connection", "keep-alive") - .header("Content-Encoding", "br") - .method(Method::POST) - .body(body)?; - - let mut response = http_client.send(request).await?; - - let mut body: Vec = Vec::new(); - response.body_mut().read_to_end(&mut body).await?; - - let response_received_at = Instant::now(); - if !response.status().is_success() { - anyhow::bail!( - "Request failed with status: {:?}\nBody: {}", - response.status(), - String::from_utf8_lossy(&body), - ); - }; - - let response: sweep_ai::AutocompleteResponse = serde_json::from_slice(&body)?; - - let old_text = snapshot - .text_for_range(response.start_index..response.end_index) - .collect::(); - let edits = language::text_diff(&old_text, &response.completion) - .into_iter() - .map(|(range, text)| { - ( - snapshot.anchor_after(response.start_index + range.start) - ..snapshot.anchor_before(response.start_index + range.end), - text, - ) - }) - .collect::>(); - - anyhow::Ok(( - response.autocomplete_id, - edits, - snapshot, - response_received_at, - inputs, - )) - } - }); - - let buffer = active_buffer.clone(); - let project = project.clone(); - let active_buffer = active_buffer.clone(); + let task = match self.edit_prediction_model { + ZetaEditPredictionModel::Zeta1 => request_prediction_with_zeta1( + self, + &project, + &active_buffer, + snapshot.clone(), + position, + events, + cx, + ), + ZetaEditPredictionModel::Zeta2 => self.request_prediction_with_zeta2( + &project, + &active_buffer, + snapshot.clone(), + position, + events, + cx, + ), + ZetaEditPredictionModel::Sweep => self.sweep_ai.request_prediction_with_sweep( + &project, + &active_buffer, + snapshot.clone(), + position, + events, + &zeta_project.recent_paths, + diagnostic_search_range.clone(), + cx, + ), + }; cx.spawn(async move |this, cx| { - let (id, edits, old_snapshot, response_received_at, inputs) = result.await?; + let prediction = task + .await? + .filter(|prediction| !prediction.edits.is_empty()); - if edits.is_empty() { + if prediction.is_none() && allow_jump { + let cursor_point = position.to_point(&snapshot); if has_events - && allow_jump && let Some((jump_buffer, jump_position)) = Self::next_diagnostic_location( - active_buffer, + active_buffer.clone(), &snapshot, diagnostic_search_range, cursor_point, @@ -1436,9 +1261,9 @@ impl Zeta { { return this .update(cx, |this, cx| { - this.request_prediction_with_sweep( - &project, - &jump_buffer, + this.request_prediction_internal( + project, + jump_buffer, jump_position, false, cx, @@ -1450,19 +1275,7 @@ impl Zeta { return anyhow::Ok(None); } - anyhow::Ok( - EditPrediction::new( - EditPredictionId(id.into()), - &buffer, - &old_snapshot, - edits.into(), - buffer_snapshotted_at, - response_received_at, - inputs, - cx, - ) - .await, - ) + Ok(prediction) }) } @@ -1549,7 +1362,9 @@ impl Zeta { &mut self, project: &Entity, active_buffer: &Entity, + active_snapshot: BufferSnapshot, position: language::Anchor, + events: Vec>, cx: &mut Context, ) -> Task>> { let project_state = self.projects.get(&project.entity_id()); @@ -1561,7 +1376,6 @@ impl Zeta { .map(|syntax_index| syntax_index.read_with(cx, |index, _cx| index.state().clone())) }); let options = self.options.clone(); - let active_snapshot = active_buffer.read(cx).snapshot(); let buffer_snapshotted_at = Instant::now(); let Some(excerpt_path) = active_snapshot .file() @@ -1579,10 +1393,6 @@ impl Zeta { .collect::>(); let debug_tx = self.debug_tx.clone(); - let events = project_state - .map(|state| state.events(cx)) - .unwrap_or_default(); - let diagnostics = active_snapshot.diagnostic_sets().clone(); let file = active_buffer.read(cx).file(); diff --git a/crates/zeta/src/zeta1.rs b/crates/zeta/src/zeta1.rs index 5a779cabeceac0bcb58340f7bbb98175409916e8..7f80d60d5efcbbd0bd7b9426508c344c063d5597 100644 --- a/crates/zeta/src/zeta1.rs +++ b/crates/zeta/src/zeta1.rs @@ -32,19 +32,17 @@ pub(crate) fn request_prediction_with_zeta1( zeta: &mut Zeta, project: &Entity, buffer: &Entity, + snapshot: BufferSnapshot, position: language::Anchor, + events: Vec>, cx: &mut Context, ) -> Task>> { let buffer = buffer.clone(); let buffer_snapshotted_at = Instant::now(); - let snapshot = buffer.read(cx).snapshot(); let client = zeta.client.clone(); let llm_token = zeta.llm_token.clone(); let app_version = AppVersion::global(cx); - let zeta_project = zeta.get_or_init_zeta_project(project, cx); - let events = Arc::new(zeta_project.events(cx)); - let (git_info, can_collect_file) = if let Some(file) = snapshot.file() { let can_collect_file = zeta.can_collect_file(project, file, cx); let git_info = if can_collect_file { diff --git a/crates/zeta2_tools/src/zeta2_tools.rs b/crates/zeta2_tools/src/zeta2_tools.rs index 6a6268f68ad0fa10e2379ac21e07d4fa530dddc1..4e650f2405d63feab010c5c9b73efc75bd576af6 100644 --- a/crates/zeta2_tools/src/zeta2_tools.rs +++ b/crates/zeta2_tools/src/zeta2_tools.rs @@ -42,43 +42,48 @@ actions!( pub fn init(cx: &mut App) { cx.observe_new(move |workspace: &mut Workspace, _, _cx| { - workspace.register_action(move |workspace, _: &OpenZeta2Inspector, window, cx| { - let project = workspace.project(); - workspace.split_item( - SplitDirection::Right, - Box::new(cx.new(|cx| { - Zeta2Inspector::new( - &project, - workspace.client(), - workspace.user_store(), - window, - cx, - ) - })), - window, - cx, - ); - }); - }) - .detach(); - - cx.observe_new(move |workspace: &mut Workspace, _, _cx| { - workspace.register_action(move |workspace, _: &OpenZeta2ContextView, window, cx| { - let project = workspace.project(); - workspace.split_item( - SplitDirection::Right, - Box::new(cx.new(|cx| { - Zeta2ContextView::new( - project.clone(), - workspace.client(), - workspace.user_store(), - window, - cx, - ) - })), - window, - cx, - ); + workspace.register_action_renderer(|div, _, _, cx| { + let has_flag = cx.has_flag::(); + div.when(has_flag, |div| { + div.on_action( + cx.listener(move |workspace, _: &OpenZeta2Inspector, window, cx| { + let project = workspace.project(); + workspace.split_item( + SplitDirection::Right, + Box::new(cx.new(|cx| { + Zeta2Inspector::new( + &project, + workspace.client(), + workspace.user_store(), + window, + cx, + ) + })), + window, + cx, + ) + }), + ) + .on_action(cx.listener( + move |workspace, _: &OpenZeta2ContextView, window, cx| { + let project = workspace.project(); + workspace.split_item( + SplitDirection::Right, + Box::new(cx.new(|cx| { + Zeta2ContextView::new( + project.clone(), + workspace.client(), + workspace.user_store(), + window, + cx, + ) + })), + window, + cx, + ); + }, + )) + }) }); }) .detach(); From fb0fcd86fd4de39107ca034265e3fc01124a111c Mon Sep 17 00:00:00 2001 From: Max Brunsfeld Date: Tue, 25 Nov 2025 10:43:46 -0800 Subject: [PATCH 032/749] Add missing update of last_prediction_refresh (#43483) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fixes a regression introduced in https://github.com/zed-industries/zed/pull/43284 where edit predictions stopped being throttled at all 😬 Release Notes: - N/A Co-authored-by: Ben Kunkle --- crates/zeta/src/zeta.rs | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/crates/zeta/src/zeta.rs b/crates/zeta/src/zeta.rs index 78169821f0e6cddd51bcd2075d766c6d2e0f2e71..8b54576a12f2ff788b6088299c30923b2ce8adda 100644 --- a/crates/zeta/src/zeta.rs +++ b/crates/zeta/src/zeta.rs @@ -1123,7 +1123,6 @@ impl Zeta { zeta_project.next_pending_prediction_id += 1; let last_request = zeta_project.last_prediction_refresh; - // TODO report cancelled requests like in zeta1 let task = cx.spawn(async move |this, cx| { if let Some((last_entity, last_timestamp)) = last_request && throttle_entity == last_entity @@ -1133,6 +1132,12 @@ impl Zeta { cx.background_executor().timer(timeout).await; } + this.update(cx, |this, cx| { + this.get_or_init_zeta_project(&project, cx) + .last_prediction_refresh = Some((throttle_entity, Instant::now())); + }) + .ok(); + let edit_prediction_id = do_refresh(this.clone(), cx).await.log_err().flatten(); // When a prediction completes, remove it from the pending list, and cancel From 7ecbf8cf60feff43da961ec3e3d99e7570f75454 Mon Sep 17 00:00:00 2001 From: Ben Kunkle Date: Tue, 25 Nov 2025 10:44:04 -0800 Subject: [PATCH 033/749] zeta2: Remove expected context from evals (#43430) Closes #ISSUE Release Notes: - N/A *or* Added/Fixed/Improved ... --- crates/zeta_cli/src/evaluate.rs | 114 +------------------ crates/zeta_cli/src/example.rs | 189 +------------------------------- crates/zeta_cli/src/main.rs | 2 - crates/zeta_cli/src/predict.rs | 87 ++------------- 4 files changed, 18 insertions(+), 374 deletions(-) diff --git a/crates/zeta_cli/src/evaluate.rs b/crates/zeta_cli/src/evaluate.rs index a0ebdf998595ccacec2dafecf51b6094e5e401b5..6726dcb3aafdeff7fe41cbbbc49850c1e7465cf4 100644 --- a/crates/zeta_cli/src/evaluate.rs +++ b/crates/zeta_cli/src/evaluate.rs @@ -1,5 +1,5 @@ use std::{ - collections::{BTreeSet, HashMap}, + collections::HashMap, io::{IsTerminal, Write}, sync::Arc, }; @@ -125,21 +125,10 @@ fn write_aggregated_scores( .peekable(); let has_edit_predictions = edit_predictions.peek().is_some(); let aggregated_result = EvaluationResult { - context: Scores::aggregate(successful.iter().map(|r| &r.context)), edit_prediction: has_edit_predictions.then(|| Scores::aggregate(edit_predictions)), prompt_len: successful.iter().map(|r| r.prompt_len).sum::() / successful.len(), generated_len: successful.iter().map(|r| r.generated_len).sum::() / successful.len(), - context_lines_found_in_context: successful - .iter() - .map(|r| r.context_lines_found_in_context) - .sum::() - / successful.len(), - context_lines_in_expected_patch: successful - .iter() - .map(|r| r.context_lines_in_expected_patch) - .sum::() - / successful.len(), }; writeln!(w, "\n{}", "-".repeat(80))?; @@ -261,11 +250,8 @@ fn write_eval_result( #[derive(Debug, Default)] pub struct EvaluationResult { pub edit_prediction: Option, - pub context: Scores, pub prompt_len: usize, pub generated_len: usize, - pub context_lines_in_expected_patch: usize, - pub context_lines_found_in_context: usize, } #[derive(Default, Debug)] @@ -363,14 +349,6 @@ impl std::fmt::Display for EvaluationResult { impl EvaluationResult { fn fmt_markdown(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!( - f, - r#" -### Context Scores -{} -"#, - self.context.to_markdown(), - )?; if let Some(prediction) = &self.edit_prediction { write!( f, @@ -387,34 +365,18 @@ impl EvaluationResult { writeln!(f, "### Scores\n")?; writeln!( f, - " Prompt Generated RetrievedContext PatchContext TP FP FN Precision Recall F1" + " Prompt Generated TP FP FN Precision Recall F1" )?; writeln!( f, - "─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────" - )?; - writeln!( - f, - "Context Retrieval {:<7} {:<9} {:<16} {:<16} {:<6} {:<6} {:<6} {:>10.2} {:>7.2} {:>7.2}", - "", - "", - "", - "", - self.context.true_positives, - self.context.false_positives, - self.context.false_negatives, - self.context.precision() * 100.0, - self.context.recall() * 100.0, - self.context.f1_score() * 100.0 + "───────────────────────────────────────────────────────────────────────────────────────────────" )?; if let Some(edit_prediction) = &self.edit_prediction { writeln!( f, - "Edit Prediction {:<7} {:<9} {:<16} {:<16} {:<6} {:<6} {:<6} {:>10.2} {:>7.2} {:>7.2}", + "Edit Prediction {:<7} {:<9} {:<6} {:<6} {:<6} {:>9.2} {:>8.2} {:>7.2}", self.prompt_len, self.generated_len, - self.context_lines_found_in_context, - self.context_lines_in_expected_patch, edit_prediction.true_positives, edit_prediction.false_positives, edit_prediction.false_negatives, @@ -434,53 +396,6 @@ fn evaluate(example: &Example, preds: &PredictionDetails, predict: bool) -> Eval ..Default::default() }; - let actual_context_lines: HashSet<_> = preds - .excerpts - .iter() - .flat_map(|excerpt| { - excerpt - .text - .lines() - .map(|line| format!("{}: {line}", excerpt.path.display())) - }) - .collect(); - - let mut false_positive_lines = actual_context_lines.clone(); - - for entry in &example.expected_context { - let mut best_alternative_score: Option = None; - - for alternative in &entry.alternatives { - let expected: HashSet<_> = alternative - .excerpts - .iter() - .flat_map(|excerpt| { - excerpt - .text - .lines() - .map(|line| format!("{}: {line}", excerpt.path.display())) - }) - .collect(); - - let scores = Scores::new(&expected, &actual_context_lines); - - false_positive_lines.retain(|line| !expected.contains(line)); - - if best_alternative_score - .as_ref() - .is_none_or(|best| scores.recall() > best.recall()) - { - best_alternative_score = Some(scores); - } - } - - let best_alternative = best_alternative_score.unwrap_or_default(); - eval_result.context.false_negatives += best_alternative.false_negatives; - eval_result.context.true_positives += best_alternative.true_positives; - } - - eval_result.context.false_positives = false_positive_lines.len(); - if predict { // todo: alternatives for patches let expected_patch = example @@ -493,25 +408,6 @@ fn evaluate(example: &Example, preds: &PredictionDetails, predict: bool) -> Eval .filter(|line| matches!(line, DiffLine::Addition(_) | DiffLine::Deletion(_))) .map(|line| line.to_string()) .collect(); - let expected_context_lines = expected_patch - .iter() - .filter_map(|line| { - if let DiffLine::Context(str) = line { - Some(String::from(*str)) - } else { - None - } - }) - .collect::>(); - let actual_context_lines = preds - .excerpts - .iter() - .flat_map(|excerpt| excerpt.text.lines().map(ToOwned::to_owned)) - .collect::>(); - - let matched = expected_context_lines - .intersection(&actual_context_lines) - .count(); let actual_patch_lines = preds .diff @@ -522,8 +418,6 @@ fn evaluate(example: &Example, preds: &PredictionDetails, predict: bool) -> Eval .collect(); eval_result.edit_prediction = Some(Scores::new(&expected_patch_lines, &actual_patch_lines)); - eval_result.context_lines_in_expected_patch = expected_context_lines.len(); - eval_result.context_lines_found_in_context = matched; } eval_result diff --git a/crates/zeta_cli/src/example.rs b/crates/zeta_cli/src/example.rs index 7dbe304a88b9ea024adab793fa782fd2f4bdf1c0..a9d4c4f47c5a05d4198b1cffaee51e14a122e88d 100644 --- a/crates/zeta_cli/src/example.rs +++ b/crates/zeta_cli/src/example.rs @@ -14,7 +14,6 @@ use anyhow::{Context as _, Result, anyhow}; use clap::ValueEnum; use cloud_zeta2_prompt::CURSOR_MARKER; use collections::HashMap; -use edit_prediction_context::Line; use futures::{ AsyncWriteExt as _, lock::{Mutex, OwnedMutexGuard}, @@ -53,7 +52,6 @@ pub struct Example { pub cursor_position: String, pub edit_history: String, pub expected_patch: String, - pub expected_context: Vec, } pub type ActualExcerpt = Excerpt; @@ -64,25 +62,6 @@ pub struct Excerpt { pub text: String, } -#[derive(Default, Clone, Debug, Serialize, Deserialize)] -pub struct ExpectedContextEntry { - pub heading: String, - pub alternatives: Vec, -} - -#[derive(Default, Clone, Debug, Serialize, Deserialize)] -pub struct ExpectedExcerptSet { - pub heading: String, - pub excerpts: Vec, -} - -#[derive(Clone, Debug, Serialize, Deserialize)] -pub struct ExpectedExcerpt { - pub path: PathBuf, - pub text: String, - pub required_lines: Vec, -} - #[derive(ValueEnum, Debug, Clone)] pub enum ExampleFormat { Json, @@ -132,7 +111,6 @@ impl NamedExample { cursor_position: String::new(), edit_history: String::new(), expected_patch: String::new(), - expected_context: Vec::new(), }, }; @@ -197,30 +175,10 @@ impl NamedExample { }; } Event::End(TagEnd::Heading(HeadingLevel::H3)) => { - let heading = mem::take(&mut text); - match current_section { - Section::ExpectedExcerpts => { - named.example.expected_context.push(ExpectedContextEntry { - heading, - alternatives: Vec::new(), - }); - } - _ => {} - } + mem::take(&mut text); } Event::End(TagEnd::Heading(HeadingLevel::H4)) => { - let heading = mem::take(&mut text); - match current_section { - Section::ExpectedExcerpts => { - let expected_context = &mut named.example.expected_context; - let last_entry = expected_context.last_mut().unwrap(); - last_entry.alternatives.push(ExpectedExcerptSet { - heading, - excerpts: Vec::new(), - }) - } - _ => {} - } + mem::take(&mut text); } Event::End(TagEnd::Heading(level)) => { anyhow::bail!("Unexpected heading level: {level}"); @@ -253,41 +211,7 @@ impl NamedExample { named.example.cursor_position = mem::take(&mut text); } Section::ExpectedExcerpts => { - let text = mem::take(&mut text); - for excerpt in text.split("\n…\n") { - let (mut text, required_lines) = extract_required_lines(&excerpt); - if !text.ends_with('\n') { - text.push('\n'); - } - - if named.example.expected_context.is_empty() { - named.example.expected_context.push(Default::default()); - } - - let alternatives = &mut named - .example - .expected_context - .last_mut() - .unwrap() - .alternatives; - - if alternatives.is_empty() { - alternatives.push(ExpectedExcerptSet { - heading: String::new(), - excerpts: vec![], - }); - } - - alternatives - .last_mut() - .unwrap() - .excerpts - .push(ExpectedExcerpt { - path: block_info.into(), - text, - required_lines, - }); - } + mem::take(&mut text); } Section::ExpectedPatch => { named.example.expected_patch = mem::take(&mut text); @@ -561,47 +485,6 @@ impl NamedExample { } } -fn extract_required_lines(text: &str) -> (String, Vec) { - const MARKER: &str = "[ZETA]"; - let mut new_text = String::new(); - let mut required_lines = Vec::new(); - let mut skipped_lines = 0_u32; - - for (row, mut line) in text.split('\n').enumerate() { - if let Some(marker_column) = line.find(MARKER) { - let mut strip_column = marker_column; - - while strip_column > 0 { - let prev_char = line[strip_column - 1..].chars().next().unwrap(); - if prev_char.is_whitespace() || ['/', '#'].contains(&prev_char) { - strip_column -= 1; - } else { - break; - } - } - - let metadata = &line[marker_column + MARKER.len()..]; - if metadata.contains("required") { - required_lines.push(Line(row as u32 - skipped_lines)); - } - - if strip_column == 0 { - skipped_lines += 1; - continue; - } - - line = &line[..strip_column]; - } - - new_text.push_str(line); - new_text.push('\n'); - } - - new_text.pop(); - - (new_text, required_lines) -} - async fn run_git(repo_path: &Path, args: &[&str]) -> Result { let output = smol::process::Command::new("git") .current_dir(repo_path) @@ -656,37 +539,6 @@ impl Display for NamedExample { )?; } - if !self.example.expected_context.is_empty() { - write!(f, "\n## {EXPECTED_CONTEXT_HEADING}\n\n")?; - - for entry in &self.example.expected_context { - write!(f, "\n### {}\n\n", entry.heading)?; - - let skip_h4 = - entry.alternatives.len() == 1 && entry.alternatives[0].heading.is_empty(); - - for excerpt_set in &entry.alternatives { - if !skip_h4 { - write!(f, "\n#### {}\n\n", excerpt_set.heading)?; - } - - for excerpt in &excerpt_set.excerpts { - write!( - f, - "`````{}{}\n{}`````\n\n", - excerpt - .path - .extension() - .map(|ext| format!("{} ", ext.to_string_lossy())) - .unwrap_or_default(), - excerpt.path.display(), - excerpt.text - )?; - } - } - } - } - Ok(()) } } @@ -707,38 +559,3 @@ pub async fn lock_repo(path: impl AsRef) -> OwnedMutexGuard<()> { .lock_owned() .await } - -#[cfg(test)] -mod tests { - use super::*; - use indoc::indoc; - use pretty_assertions::assert_eq; - - #[test] - fn test_extract_required_lines() { - let input = indoc! {" - zero - one // [ZETA] required - two - // [ZETA] something - three - four # [ZETA] required - five - "}; - - let expected_updated_input = indoc! {" - zero - one - two - three - four - five - "}; - - let expected_required_lines = vec![Line(1), Line(4)]; - - let (updated_input, required_lines) = extract_required_lines(input); - assert_eq!(updated_input, expected_updated_input); - assert_eq!(required_lines, expected_required_lines); - } -} diff --git a/crates/zeta_cli/src/main.rs b/crates/zeta_cli/src/main.rs index f87563cc34ca7631baf8195e42e4e3473f522659..d13f0710cdc4d16666594d25dc639d337fb6bdfc 100644 --- a/crates/zeta_cli/src/main.rs +++ b/crates/zeta_cli/src/main.rs @@ -128,8 +128,6 @@ pub struct PredictArguments { #[derive(Clone, Debug, Args)] pub struct PredictionOptions { - #[arg(long)] - use_expected_context: bool, #[clap(flatten)] zeta2: Zeta2Args, #[clap(long)] diff --git a/crates/zeta_cli/src/predict.rs b/crates/zeta_cli/src/predict.rs index a757a5faa0dbae95c4dcab58c76d50450b1d2e9f..8a1a4131fb684a5186b2111f9d922fa34d6972e1 100644 --- a/crates/zeta_cli/src/predict.rs +++ b/crates/zeta_cli/src/predict.rs @@ -1,4 +1,4 @@ -use crate::example::{ActualExcerpt, ExpectedExcerpt, NamedExample}; +use crate::example::{ActualExcerpt, NamedExample}; use crate::headless::ZetaCliAppState; use crate::paths::{CACHE_DIR, LATEST_EXAMPLE_RUN_DIR, RUN_DIR, print_run_data_dir}; use crate::{ @@ -7,16 +7,13 @@ use crate::{ use ::serde::Serialize; use anyhow::{Context, Result, anyhow}; use cloud_zeta2_prompt::{CURSOR_MARKER, write_codeblock}; -use collections::HashMap; use futures::StreamExt as _; use gpui::{AppContext, AsyncApp, Entity}; -use language::{Anchor, Buffer, Point}; use project::Project; use project::buffer_store::BufferStoreEvent; use serde::Deserialize; use std::fs; use std::io::{IsTerminal, Write}; -use std::ops::Range; use std::path::PathBuf; use std::sync::Arc; use std::sync::Mutex; @@ -204,15 +201,12 @@ pub async fn perform_predict( let mut result = result.lock().unwrap(); result.generated_len = response.chars().count(); - if !options.use_expected_context { - result.planning_search_time = Some( - search_queries_generated_at.unwrap() - start_time.unwrap(), - ); - result.running_search_time = Some( - search_queries_executed_at.unwrap() - - search_queries_generated_at.unwrap(), - ); - } + result.planning_search_time = + Some(search_queries_generated_at.unwrap() - start_time.unwrap()); + result.running_search_time = Some( + search_queries_executed_at.unwrap() + - search_queries_generated_at.unwrap(), + ); result.prediction_time = prediction_finished_at - prediction_started_at; result.total_time = prediction_finished_at - start_time.unwrap(); @@ -224,37 +218,10 @@ pub async fn perform_predict( } }); - if options.use_expected_context { - let context_excerpts_tasks = example - .example - .expected_context - .iter() - .flat_map(|section| { - section.alternatives[0].excerpts.iter().map(|excerpt| { - resolve_context_entry(project.clone(), excerpt.clone(), cx.clone()) - }) - }) - .collect::>(); - let context_excerpts_vec = - futures::future::try_join_all(context_excerpts_tasks).await?; - - let mut context_excerpts = HashMap::default(); - for (buffer, mut excerpts) in context_excerpts_vec { - context_excerpts - .entry(buffer) - .or_insert(Vec::new()) - .append(&mut excerpts); - } - - zeta.update(cx, |zeta, _cx| { - zeta.set_context(project.clone(), context_excerpts) - })?; - } else { - zeta.update(cx, |zeta, cx| { - zeta.refresh_context(project.clone(), cursor_buffer.clone(), cursor_anchor, cx) - })? - .await?; - } + zeta.update(cx, |zeta, cx| { + zeta.refresh_context(project.clone(), cursor_buffer.clone(), cursor_anchor, cx) + })? + .await?; } let prediction = zeta @@ -274,38 +241,6 @@ pub async fn perform_predict( anyhow::Ok(result) } -async fn resolve_context_entry( - project: Entity, - excerpt: ExpectedExcerpt, - mut cx: AsyncApp, -) -> Result<(Entity, Vec>)> { - let buffer = project - .update(&mut cx, |project, cx| { - let project_path = project.find_project_path(&excerpt.path, cx).unwrap(); - project.open_buffer(project_path, cx) - })? - .await?; - - let ranges = buffer.read_with(&mut cx, |buffer, _| { - let full_text = buffer.text(); - let offset = full_text - .find(&excerpt.text) - .expect("Expected context not found"); - let point = buffer.offset_to_point(offset); - excerpt - .required_lines - .iter() - .map(|line| { - let row = point.row + line.0; - let range = Point::new(row, 0)..Point::new(row + 1, 0); - buffer.anchor_after(range.start)..buffer.anchor_before(range.end) - }) - .collect() - })?; - - Ok((buffer, ranges)) -} - struct RunCache { cache_mode: CacheMode, example_run_dir: PathBuf, From 83f0a3fd1364bcb39fe4a69dd5a417aa6c54f89a Mon Sep 17 00:00:00 2001 From: Peter Tripp Date: Tue, 25 Nov 2025 14:00:31 -0500 Subject: [PATCH 034/749] Redact sensitive environment variables in LSP Logs: Server Info (#43480) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Follow-up to: - https://github.com/zed-industries/zed/pull/43436 - https://github.com/zed-industries/zed/pull/42831 The changes in #42831 resulted in a regression where environment variables in the Server Info view were no longer redact. The changes in #43436 were insufficient as I was still seeing sensitive values in Nightly e6fe95b4f2f676c7fc4a5f951ba7c721e7d22e8a (which includes #43436). CC: @SomeoneToIgnore (Hi! 👋 Thanks for keeping this redaction functionality alive) Release Notes: - N/A --- crates/language_tools/src/lsp_log_view.rs | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/crates/language_tools/src/lsp_log_view.rs b/crates/language_tools/src/lsp_log_view.rs index e7586583704750b0c84832ecb8cb9ba8d5a9b5a1..4cf47cab079617d55aeeb959dcad116919a55609 100644 --- a/crates/language_tools/src/lsp_log_view.rs +++ b/crates/language_tools/src/lsp_log_view.rs @@ -340,11 +340,11 @@ impl LspLogView { * Configuration: {CONFIGURATION}", NAME = info.status.name, ID = info.id, - BINARY = info.status.binary.as_ref().map_or_else( - || "Unknown".to_string(), - |binary| serde_json::to_string_pretty(binary) - .unwrap_or_else(|e| format!("Failed to serialize binary info: {e:#}")) - ), + BINARY = info + .status + .binary + .as_ref() + .map_or_else(|| "Unknown".to_string(), |binary| format!("{:#?}", binary)), WORKSPACE_FOLDERS = info .status .workspace_folders From 1f9d5ef6849e482fc35dda552adc4c417cca1f0a Mon Sep 17 00:00:00 2001 From: Agus Zubiaga Date: Tue, 25 Nov 2025 16:49:16 -0300 Subject: [PATCH 035/749] Always display terminal cursor when blinking is disabled (#43487) Fixes an issue where the terminal cursor wouldn't always be displayed in the default `blink: "terminal_controlled"` mode unless the terminal requested cursor blinking. Release Notes: - N/A --- crates/terminal_view/src/terminal_view.rs | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/crates/terminal_view/src/terminal_view.rs b/crates/terminal_view/src/terminal_view.rs index 7c8bda83be864353c35b0317efc8599456dca6e5..7b3e29ac9b0582d081a286539d973fe8f1a453c5 100644 --- a/crates/terminal_view/src/terminal_view.rs +++ b/crates/terminal_view/src/terminal_view.rs @@ -649,9 +649,10 @@ impl TerminalView { // When focused, check blinking settings and blink manager state match TerminalSettings::get_global(cx).blinking { TerminalBlink::Off => true, - TerminalBlink::On | TerminalBlink::TerminalControlled => { - self.blink_manager.read(cx).visible() + TerminalBlink::TerminalControlled => { + !self.blinking_terminal_enabled || self.blink_manager.read(cx).visible() } + TerminalBlink::On => self.blink_manager.read(cx).visible(), } } From d49044328636181131c1d3d3822597fbcf111503 Mon Sep 17 00:00:00 2001 From: "Joseph T. Lyons" Date: Tue, 25 Nov 2025 16:06:46 -0500 Subject: [PATCH 036/749] Refresh collaboration docs (#43489) Most of the features for collab were previously listed in the section that was written for private calls. Most of this PR is moving that content over to the channel documentation and adapting it slightly. Private calls have similar collaboration, so we can just point back to the channels doc in that section and keep it pretty thin / DRY. Release Notes: - N/A --- docs/src/SUMMARY.md | 2 +- docs/src/collaboration/channels.md | 113 ++++++++++++++---- .../contacts-and-private-calls.md | 18 +++ docs/src/collaboration/overview.md | 12 +- docs/src/collaboration/private-calls.md | 99 --------------- 5 files changed, 120 insertions(+), 124 deletions(-) create mode 100644 docs/src/collaboration/contacts-and-private-calls.md delete mode 100644 docs/src/collaboration/private-calls.md diff --git a/docs/src/SUMMARY.md b/docs/src/SUMMARY.md index 2f8bcd2ce8be00790866025d5de687d32aee7dcf..5974b4f68d8951fa62b6bfaa625db5fdb38899fd 100644 --- a/docs/src/SUMMARY.md +++ b/docs/src/SUMMARY.md @@ -36,7 +36,7 @@ - [Code Completions](./completions.md) - [Collaboration](./collaboration/overview.md) - [Channels](./collaboration/channels.md) - - [Private Calls](./collaboration/private-calls.md) + - [Contacts and Private Calls](./collaboration/contacts-and-private-calls.md) - [Git](./git.md) - [Debugger](./debugger.md) - [Diagnostics](./diagnostics.md) diff --git a/docs/src/collaboration/channels.md b/docs/src/collaboration/channels.md index bc723d73dedf16d2f75179f9203cdbf473bebbbb..b20cff2a12ced63cf75c13bfd8b63d25f2c53c50 100644 --- a/docs/src/collaboration/channels.md +++ b/docs/src/collaboration/channels.md @@ -1,50 +1,123 @@ # Channels -## Overview - Channels provide a way to streamline collaborating for software engineers in many ways, but particularly: - Pairing – when working on something together, you both have your own screen, mouse, and keyboard. -- Mentoring – it’s easy to jump in to someone else’s context, and help them get unstuck, without the friction of pushing code up. +- Mentoring – it's easy to jump in to someone else's context, and help them get unstuck, without the friction of pushing code up. - Refactoring – you can have multiple people join in on large refactoring without fear of conflict. - Ambient awareness – you can see what everyone else is working on with no need for status emails or meetings. -## Channels - -To open the collaboration panel hit {#kb collab_panel::ToggleFocus} or `collab panel: toggle focus`. +Each channel corresponds to an ongoing project or work-stream. +You can see who's in a channel as their avatars will show up in the sidebar. +This makes it easy to see what everyone is doing and where to find them if needed. -Each channel corresponds to an ongoing project or work-stream. You can see who’s in a channel as their avatars will show up in the sidebar. This makes it easy to see what everyone is doing and where to find them if needed. +Create a channel by clicking the `+` icon next to the `Channels` text in the collab panel. +Create a subchannel by right clicking an existing channel and selecting `New Subchannel`. -You can create as many channels as you need. As in the example above, you can mix channels for your day job, as well as side-projects in one instance of Zed. +You can mix channels for your day job, as well as side-projects in your collab panel. Joining a channel adds you to a shared room where you can work on projects together. -## Sharing projects +_[Join our channel tree to get an idea of how you can organize yours.](https://zed.dev/community-links)_ + +## Inviting People + +By default, channels you create can only be accessed by you. +You can invite collaborators by right clicking and selecting `Manage members`. + +When you have subchannels nested under others, permissions are inherited. +For instance, adding people to the top-level channel in your channel tree will automatically give them access to its subchannels. + +Once you have added someone, they can either join your channel by clicking on it in their Zed sidebar, or you can share the link to the channel so that they can join directly. + +## Voice Chat -After joining a channel, you can `Share` a project with the other people there. This will enable them to edit the code hosted on your machine as though they had it checked out locally. +You can mute/unmute your microphone via the microphone icon in the upper right-hand side of the window. -When you are editing someone else’s project, you still have the full power of the editor at your fingertips, you can jump to definitions, use the AI assistant, and see any diagnostic errors. This is extremely powerful for pairing, as one of you can be implementing the current method while the other is reading and researching the correct solution to the next problem. And, because you have your own config running, it feels like you’re using your own machine. +> Note: When joining a channel, Zed will automatically share your microphone with other users in the call, if your OS allows it. +> If you'd prefer your microphone to be off when joining a channel, you can do so via the [`mute_on_join`](../configuring-zed.md#calls) setting. -See [our collaboration documentation](./private-calls.md) for more details about how this works. +## Sharing Projects -## Notes +After joining a channel, you can share a project over the channel via the `Share` button in the upper right-hand side of the window. +This will allow channel members to edit the code hosted on your machine as though they had it checked out locally. -Each channel has a notes file associated with it to keep track of current status, new ideas, or to collaborate on building out the design for the feature that you’re working on before diving into code. +When you are editing someone else's project, you still have the full power of the editor at your fingertips; you can jump to definitions, use the AI assistant, and see any diagnostic errors. +This is extremely powerful for pairing, as one of you can be implementing the current method while the other is reading and researching the correct solution to the next problem. +And, because you have your own config running, it feels like you're using your own machine. + +We aim to eliminate the distinction between local and remote projects as much as possible. +Collaborators can open, edit, and save files, perform searches, interact with the language server, etc. +Guests have a read-only view of the project, including access to language server info. + +### Unsharing a Project + +You can remove a project from a channel by clicking on the `Unshare` button in the title bar. + +Collaborators that are currently in that project will be disconnected from the project and will not be able to rejoin it unless you share it again. + +## Channel Notes + +Each channel has a Markdown notes file associated with it to keep track of current status, new ideas, or to collaborate on building out the design for the feature that you're working on before diving into code. This is similar to a Google Doc, except powered by Zed's collaborative software and persisted to our servers. -## Inviting people +Open the channel notes by clicking on the document icon to the right of the channel name in the collaboration panel. -By default, channels you create can only be accessed by you. You can invite collaborators by right clicking and selecting `Manage members`. +> Note: You can view a channel's notes without joining the channel, if you'd just like to read up on what has been written. -When you have channels nested under each other, permissions are inherited. For instance, in the example above, we only need to add people to the `#zed` channel, and they will automatically gain access to `#core-editor`, `#new-languages`, and `#stability`. +## Following Collaborators -Once you have added someone, they can either join your channel by clicking on it in their Zed sidebar, or you can share the link to the channel so that they can join directly. +To follow a collaborator, click on their avatar in the top left of the title bar. +You can also cycle through collaborators using {#kb workspace::FollowNextCollaborator} or `workspace: follow next collaborator` in the command palette. + +When you join a project, you'll immediately start following the collaborator that invited you. + +When you are in a pane that is following a collaborator, you will: + +- follow their cursor and scroll position +- follow them to other files in the same project +- instantly swap to viewing their screenshare in that pane, if they are sharing their screen and leave the project + +To stop following, simply move your mouse or make an edit via your keyboard. + +### How Following Works + +Following is confined to a particular pane. +When a pane is following a collaborator, it is outlined in their cursor color. + +Avatars of collaborators in the same project as you are in color, and have a cursor color. +Collaborators in other projects are shown in gray. + +This pane-specific behavior allows you to follow someone in one pane while navigating independently in another and can be an effective layout for some collaboration styles. + +### Following a Terminal + +You can follow what a collaborator is doing in their terminal by having them share their screen and following it. + +In the future, we plan to allow you to collaborate in the terminal directly in a shared project. + +## Screen Sharing + +Share your screen with collaborators in the current channel by clicking on the `Share screen` (monitor icon) button in the top right of the title bar. +If you have multiple displays, you can choose which one to share via the chevron to the right of the monitor icon. + +After you've shared your screen, others can click on the `Screen` entry under your name in the collaboration panel to open a tab that always keeps it visible. +If they are following you, Zed will automatically switch between following your cursor in their Zed instance and your screen share, depending on whether you are focused on Zed or another application, like a web browser. + +> Note: Collaborators can see your entire screen when you are screen sharing, so be careful not to share anything you don't want to share. +> Remember to stop screen sharing when you are finished. ## Livestreaming & Guests -A Channel can also be made Public. This allows anyone to join the channel by clicking on the link. +A Channel can also be made Public. +This allows anyone to join the channel by clicking on the link. Guest users in channels can hear and see everything that is happening, and have read only access to projects and channel notes. -If you'd like to invite a guest to participate in a channel for the duration of a call you can do so by right clicking on them in the Collaboration Panel. "Allowing Write Access" will allow them to edit any projects shared into the call, and to use their microphone and share their screen if they wish. +If you'd like to invite a guest to participate in a channel for the duration of a call you can do so by right clicking on them in the Collaboration Panel. +"Allowing Write Access" will allow them to edit any projects shared into the call, and to use their microphone and share their screen if they wish. + +## Leaving a Call + +You can leave a channel by clicking on the `Leave call` button in the upper right-hand side of the window. diff --git a/docs/src/collaboration/contacts-and-private-calls.md b/docs/src/collaboration/contacts-and-private-calls.md new file mode 100644 index 0000000000000000000000000000000000000000..e5660a34b643b09272c1a1bb02e026c1a4bf8e03 --- /dev/null +++ b/docs/src/collaboration/contacts-and-private-calls.md @@ -0,0 +1,18 @@ +# Contacts and Private Calls + +Zed allows you to have private calls / collaboration sessions with those in your contacts. +These calls can be one-on-ones or contain any number of users from your contacts. + +## Adding a Contact + +1. In the collaboration panel, click the `+` button next to the `Contacts` section +1. Search for the contact using their GitHub handle + _Note: The contact must be an existing Zed user who has completed the GitHub authentication flow._ +1. Your contact will receive a notification. + Once they accept, you'll both appear in each other's contact list. + +## Private Calls + +Simply click on a contact to start a private call. + +_Aside from a few additional features (channel notes, etc.), collaboration in private calls is largely the same as it is in [channels](./channels.md)._ diff --git a/docs/src/collaboration/overview.md b/docs/src/collaboration/overview.md index 8acbecc372cecee7fb87d40685b3a08eb6e046f6..fae16290fe0d9eedbff8a27a9e510964bb5aba84 100644 --- a/docs/src/collaboration/overview.md +++ b/docs/src/collaboration/overview.md @@ -2,12 +2,16 @@ At Zed, we believe that great things are built by great people working together. We have designed Zed to help individuals work faster and help teams of people work together more effectively. -Zed has two mechanisms for collaborating: -1. [Channels](./channels.md): Ongoing project rooms where team members can share projects, collaborate on code, and maintain ambient awareness of what everyone is working on. -1. [Private Calls](./private-calls.md): Ad-hoc private collaboration with those in your contacts list. +In Zed, all collaboration happens in the collaboration panel, which can be opened via {#kb collab_panel::ToggleFocus} or `collab panel: toggle focus` from the command palette. +You will need to [sign in](../authentication.md#signing-in) in order to access features within the collaboration panel. + +## Collaboration panel -You will need to [sign in](../authentication.md#signing-in) in order to begin using Zed's collaboration features. +The collaboration panel is broken down into two sections: + +1. [Channels](./channels.md): Ongoing project rooms where team members can share projects, collaborate on code, and maintain ambient awareness of what everyone is working on. +1. [Contacts](./contacts-and-private-calls.md): Ad-hoc private collaboration with those in your contacts list. --- diff --git a/docs/src/collaboration/private-calls.md b/docs/src/collaboration/private-calls.md deleted file mode 100644 index 8ea4790688f055074c5afcf4eb1d6d63ee49d868..0000000000000000000000000000000000000000 --- a/docs/src/collaboration/private-calls.md +++ /dev/null @@ -1,99 +0,0 @@ -# Private Calls - -## Adding a collaborator to a call - -Before you can collaborate, you'll need to add a collaborator to your contacts. To do this: - -1. Open the contacts menu by clicking on the `Show contacts menu` button in the upper right-hand corner of the window or by running `collab: toggle contacts menu` (`cmd-shift-c`). -2. Click the add button to the right of the search box. -3. Search for the contact you want to add using their GitHub handle. Note: the person you are trying to add as a contact must be an existing Zed user. - -### Inviting a collaborator - -You can add an existing Zed user as a contact from the contacts menu, deployed from the `Show contacts menu` button in the upper right-hand corner of the window or by `collab: toggle contacts menu` (`cmd-shift-c`) and then clicking the `Search for new contact` button to the right of the search box. - -![Inviting a collaborator to the current project](https://zed.dev/img/collaboration/add-a-collaborator.png) - -When you invite a collaborator to a project not in a call they will receive a notification to join, and a new call is created. - -![Receiving an invite to join a call](https://zed.dev/img/collaboration/receiving-an-invite.jpg) - -### Inviting non-Zed users - -If someone you want to collaborate with has not yet signed up for Zed, they will need to [download the app](https://zed.dev/download) and sign in for the first time before you can add them. Identity is tied to GitHub accounts, so new users will need to authenticate with GitHub in order to sign into Zed. - -### Voice chat - -When joining a call, Zed will automatically share your microphone with other users in the call, if your OS allows it. This isn't tied to your project. You can disable this for your client via the [`mute_on_join`](../configuring-zed.md#calls) setting. - -## Collaborating on a project - -### Share a project - -When you invite a collaborator to join your project, a new call begins. Your Zed windows will show the call participants in the title bar of the window. - -![A new Zed call with two collaborators](https://zed.dev/img/collaboration/new-call.png) - -Collaborators in the same project as you are in color, and have a cursor color. Collaborators in other projects are shown in gray. Collaborators that have access to the current project will have their own cursor color under their avatar. - -We aim to eliminate the distinction between local and remote projects as much as possible. Collaborators can open, edit, and save files, perform searches, interact with the language server, etc. Guests have a read-only view of the project, including access to language server info. - -#### Unshared Projects - -If a collaborator is currently in a project that is not shared, you will not be able to jump to their project or follow them until they either share the project or return to a project that is shared. - -If you are in a project that isn't shared, others will not be able to join it or see its contents. - -### Follow a collaborator - -To follow a collaborator, click on their avatar in the top right of the window. You can also cycle through collaborators using `workspace: follow next collaborator` (`ctrl-alt-cmd-f`). - -When you join a project, you'll immediately start following the collaborator that invited you. - -![Automatically following the person inviting us to a project](https://zed.dev/img/collaboration/joining-a-call.png) - -When you are in a pane that is following a collaborator, you will: - -- follow their cursor and scroll position -- follow them to other files in the same project -- instantly swap to viewing their screen in that pane, if they are sharing their screen and leave the project - -If you move your cursor or make an edit in that pane, you will stop following. - -To start following again, you can click on a collaborator's avatar or cycle through following different participants by pressing `workspace: follow next collaborator` (`ctrl-alt-cmd-f`). - -#### How following works - -Following is confined to a particular pane. When a pane is following a collaborator, it is outlined in their cursor color. - -This pane-specific behavior allows you to follow someone in one pane while navigating independently in another and can be an effective layout for some collaboration styles. - -### Sharing your screen - -Share your screen with collaborators in the current call by clicking on the `Share screen` button in the top right of the window. - -Collaborators will see your screen if they are following you and you start viewing a window outside Zed or a project that is not shared. - -Collaborators can see your entire screen when you are screen sharing, so be careful not to share anything you don't want to share. Remember to stop screen sharing when you are finished. - -Call participants can open a dedicated tab for your screen share by opening the contacts menu in the top right and clicking on the `Screen` entry if you are sharing your screen. - -### Adding a project - -You can add a project to a call by clicking on the `Share` button next to the project name in the title bar. - -### Removing a project - -You can remove a project from a call by clicking on the `Unshare` button next to the project name in the title bar. - -Collaborators that are currently in that project will be disconnected from the project and will not be able to rejoin it unless you share it again. - -### Following a collaborator's terminal - -You can follow what a collaborator is doing in their terminal by having them share their screen and following it. - -In the future, we plan to allow you to collaborate in the terminal directly in a shared project. - -### Leave call - -You can leave a call by opening the contacts menu in the top right and clicking on the `Leave call` button. From 877763b960f5eeb387cfb5513084f99b578c5c87 Mon Sep 17 00:00:00 2001 From: "Joseph T. Lyons" Date: Tue, 25 Nov 2025 17:08:39 -0500 Subject: [PATCH 037/749] More tweaks to collaboration docs (#43494) Release Notes: - N/A --- docs/src/collaboration/channels.md | 7 +++---- .../src/collaboration/contacts-and-private-calls.md | 13 ++++++++++--- docs/src/collaboration/overview.md | 5 ++++- 3 files changed, 17 insertions(+), 8 deletions(-) diff --git a/docs/src/collaboration/channels.md b/docs/src/collaboration/channels.md index b20cff2a12ced63cf75c13bfd8b63d25f2c53c50..ebc2760275c7e3382dfabeac296dfede1b58d268 100644 --- a/docs/src/collaboration/channels.md +++ b/docs/src/collaboration/channels.md @@ -18,7 +18,7 @@ You can mix channels for your day job, as well as side-projects in your collab p Joining a channel adds you to a shared room where you can work on projects together. -_[Join our channel tree to get an idea of how you can organize yours.](https://zed.dev/community-links)_ +_Join [our channel tree](https://zed.dev/channel/zed-283) to get an idea of how you can organize yours._ ## Inviting People @@ -93,9 +93,8 @@ This pane-specific behavior allows you to follow someone in one pane while navig ### Following a Terminal -You can follow what a collaborator is doing in their terminal by having them share their screen and following it. - -In the future, we plan to allow you to collaborate in the terminal directly in a shared project. +Following is not currently supported in the terminal in the way it is supported in the editor. +As a workaround, collaborators can share their screen and you can follow that instead. ## Screen Sharing diff --git a/docs/src/collaboration/contacts-and-private-calls.md b/docs/src/collaboration/contacts-and-private-calls.md index e5660a34b643b09272c1a1bb02e026c1a4bf8e03..f011fa2c672c2e6e563e65172705115802262a7e 100644 --- a/docs/src/collaboration/contacts-and-private-calls.md +++ b/docs/src/collaboration/contacts-and-private-calls.md @@ -6,13 +6,20 @@ These calls can be one-on-ones or contain any number of users from your contacts ## Adding a Contact 1. In the collaboration panel, click the `+` button next to the `Contacts` section -1. Search for the contact using their GitHub handle - _Note: The contact must be an existing Zed user who has completed the GitHub authentication flow._ +1. Search for the contact using their GitHub handle.\ + _Note: Your contact must be an existing Zed user who has completed the GitHub authentication sign-in flow._ 1. Your contact will receive a notification. Once they accept, you'll both appear in each other's contact list. ## Private Calls -Simply click on a contact to start a private call. +To start up a private call... + +1. Click the `...` menu next to an online contact's name in the collaboration panel. +1. Click `Call ` + +Once you've begun a private call, you can add other online contacts by clicking on their name in the collaboration panel. + +--- _Aside from a few additional features (channel notes, etc.), collaboration in private calls is largely the same as it is in [channels](./channels.md)._ diff --git a/docs/src/collaboration/overview.md b/docs/src/collaboration/overview.md index fae16290fe0d9eedbff8a27a9e510964bb5aba84..719aa56ee3b62c8562cd03ff8dd29faf25f2df5b 100644 --- a/docs/src/collaboration/overview.md +++ b/docs/src/collaboration/overview.md @@ -4,6 +4,7 @@ At Zed, we believe that great things are built by great people working together. We have designed Zed to help individuals work faster and help teams of people work together more effectively. In Zed, all collaboration happens in the collaboration panel, which can be opened via {#kb collab_panel::ToggleFocus} or `collab panel: toggle focus` from the command palette. + You will need to [sign in](../authentication.md#signing-in) in order to access features within the collaboration panel. ## Collaboration panel @@ -11,7 +12,7 @@ You will need to [sign in](../authentication.md#signing-in) in order to access f The collaboration panel is broken down into two sections: 1. [Channels](./channels.md): Ongoing project rooms where team members can share projects, collaborate on code, and maintain ambient awareness of what everyone is working on. -1. [Contacts](./contacts-and-private-calls.md): Ad-hoc private collaboration with those in your contacts list. +1. [Contacts and Private Calls](./contacts-and-private-calls.md): Your contacts list for ad-hoc private collaboration. --- @@ -19,3 +20,5 @@ The collaboration panel is broken down into two sections: > Since sharing a project gives them access to your local file system, you should not share projects with people you do not trust; they could potentially do some nasty things. > > In the future, we will do more to prevent this type of access beyond the shared project and add more control over what collaborators can do, but for now, only collaborate with people you trust. + +See our [Data and Privacy FAQs](https://zed.dev/faq#data-and-privacy) for collaboration. From 53eb35f5b2f6eb21ce9c5f4cb8fd511481bed83c Mon Sep 17 00:00:00 2001 From: Mikayla Maki Date: Tue, 25 Nov 2025 14:17:27 -0800 Subject: [PATCH 038/749] Add GPT 5.1 to Zed BYOK (#43492) Release Notes: - Added support for OpenAI's GPT 5.1 model to BYOK --- Cargo.lock | 4 ++-- Cargo.toml | 2 +- crates/language_models/src/provider/open_ai.rs | 12 ++++++------ crates/open_ai/src/open_ai.rs | 9 ++++++++- 4 files changed, 17 insertions(+), 10 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 2698d882403b159f8ed350c59cc8e98ab467360d..533839bdd306e1d9c1e75e75dd2b26b80257d534 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -17313,8 +17313,8 @@ dependencies = [ [[package]] name = "tiktoken-rs" -version = "0.8.0" -source = "git+https://github.com/zed-industries/tiktoken-rs?rev=30c32a4522751699adeda0d5840c71c3b75ae73d#30c32a4522751699adeda0d5840c71c3b75ae73d" +version = "0.9.1" +source = "git+https://github.com/zed-industries/tiktoken-rs?rev=7249f999c5fdf9bf3cc5c288c964454e4dac0c00#7249f999c5fdf9bf3cc5c288c964454e4dac0c00" dependencies = [ "anyhow", "base64 0.22.1", diff --git a/Cargo.toml b/Cargo.toml index ab18418939e1b7100684e3c0acec277e7ec75a88..4377b120450c8da185820ebb8c44a334ba8a3778 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -655,7 +655,7 @@ sysinfo = "0.37.0" take-until = "0.2.0" tempfile = "3.20.0" thiserror = "2.0.12" -tiktoken-rs = { git = "https://github.com/zed-industries/tiktoken-rs", rev = "30c32a4522751699adeda0d5840c71c3b75ae73d" } +tiktoken-rs = { git = "https://github.com/zed-industries/tiktoken-rs", rev = "7249f999c5fdf9bf3cc5c288c964454e4dac0c00" } time = { version = "0.3", features = [ "macros", "parsing", diff --git a/crates/language_models/src/provider/open_ai.rs b/crates/language_models/src/provider/open_ai.rs index 9d828d188586b92e3f47a1345e070f33af380d48..46cea34e3e01cb0f8ad0f859827881f3ec74cad7 100644 --- a/crates/language_models/src/provider/open_ai.rs +++ b/crates/language_models/src/provider/open_ai.rs @@ -277,6 +277,7 @@ impl LanguageModel for OpenAiLanguageModel { | Model::Five | Model::FiveMini | Model::FiveNano + | Model::FivePointOne | Model::O1 | Model::O3 | Model::O4Mini => true, @@ -644,7 +645,6 @@ pub fn count_open_ai_tokens( ) -> BoxFuture<'static, Result> { cx.background_spawn(async move { let messages = collect_tiktoken_messages(request); - match model { Model::Custom { max_tokens, .. } => { let model = if max_tokens >= 100_000 { @@ -672,11 +672,11 @@ pub fn count_open_ai_tokens( | Model::O1 | Model::O3 | Model::O3Mini - | Model::O4Mini => tiktoken_rs::num_tokens_from_messages(model.id(), &messages), - // GPT-5 models don't have tiktoken support yet; fall back on gpt-4o tokenizer - Model::Five | Model::FiveMini | Model::FiveNano => { - tiktoken_rs::num_tokens_from_messages("gpt-4o", &messages) - } + | Model::O4Mini + | Model::Five + | Model::FiveMini + | Model::FiveNano => tiktoken_rs::num_tokens_from_messages(model.id(), &messages), // GPT-5.1 doesn't have tiktoken support yet; fall back on gpt-4o tokenizer + Model::FivePointOne => tiktoken_rs::num_tokens_from_messages("gpt-5", &messages), } .map(|tokens| tokens as u64) }) diff --git a/crates/open_ai/src/open_ai.rs b/crates/open_ai/src/open_ai.rs index aaeee01c9c74f8592ccfffa01893f9333f120e89..6fdb393c9a13c7ff6a6981f949b4d0c865b9bff8 100644 --- a/crates/open_ai/src/open_ai.rs +++ b/crates/open_ai/src/open_ai.rs @@ -85,7 +85,8 @@ pub enum Model { FiveMini, #[serde(rename = "gpt-5-nano")] FiveNano, - + #[serde(rename = "gpt-5.1")] + FivePointOne, #[serde(rename = "custom")] Custom { name: String, @@ -121,6 +122,7 @@ impl Model { "gpt-5" => Ok(Self::Five), "gpt-5-mini" => Ok(Self::FiveMini), "gpt-5-nano" => Ok(Self::FiveNano), + "gpt-5.1" => Ok(Self::FivePointOne), invalid_id => anyhow::bail!("invalid model id '{invalid_id}'"), } } @@ -142,6 +144,7 @@ impl Model { Self::Five => "gpt-5", Self::FiveMini => "gpt-5-mini", Self::FiveNano => "gpt-5-nano", + Self::FivePointOne => "gpt-5.1", Self::Custom { name, .. } => name, } } @@ -163,6 +166,7 @@ impl Model { Self::Five => "gpt-5", Self::FiveMini => "gpt-5-mini", Self::FiveNano => "gpt-5-nano", + Self::FivePointOne => "gpt-5.1", Self::Custom { name, display_name, .. } => display_name.as_ref().unwrap_or(name), @@ -186,6 +190,7 @@ impl Model { Self::Five => 272_000, Self::FiveMini => 272_000, Self::FiveNano => 272_000, + Self::FivePointOne => 400_000, Self::Custom { max_tokens, .. } => *max_tokens, } } @@ -210,6 +215,7 @@ impl Model { Self::Five => Some(128_000), Self::FiveMini => Some(128_000), Self::FiveNano => Some(128_000), + Self::FivePointOne => Some(128_000), } } @@ -237,6 +243,7 @@ impl Model { | Self::FourPointOneNano | Self::Five | Self::FiveMini + | Self::FivePointOne | Self::FiveNano => true, Self::O1 | Self::O3 | Self::O3Mini | Self::O4Mini | Model::Custom { .. } => false, } From 6548eb74f19493f3a58c55ab4187d29c3c1f2958 Mon Sep 17 00:00:00 2001 From: John Tur Date: Tue, 25 Nov 2025 18:05:59 -0500 Subject: [PATCH 039/749] Upgrade `python-environment-tools` (#43496) Fixes https://github.com/zed-industries/zed/issues/42554 Fixes https://github.com/zed-industries/zed/issues/43383 Release Notes: - python: Added support for detecting uv workspaces as toolchains. - windows: Fixed console windows sometimes appearing when opening Python files. --- Cargo.lock | 61 +++++++++++++++++++++------------- Cargo.toml | 16 ++++----- crates/languages/src/python.rs | 4 +++ 3 files changed, 49 insertions(+), 32 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 533839bdd306e1d9c1e75e75dd2b26b80257d534..38b7a6939878fad9bfa259ee03189e018ef507c9 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -11530,7 +11530,7 @@ dependencies = [ [[package]] name = "pet" version = "0.1.0" -source = "git+https://github.com/microsoft/python-environment-tools.git?rev=e97b9508befa0062929da65a01054d25c4be861c#e97b9508befa0062929da65a01054d25c4be861c" +source = "git+https://github.com/microsoft/python-environment-tools.git?rev=1e86914c3ce2f3a08c0cedbcb0615a7f9fa7a5da#1e86914c3ce2f3a08c0cedbcb0615a7f9fa7a5da" dependencies = [ "clap", "env_logger 0.10.2", @@ -11555,6 +11555,7 @@ dependencies = [ "pet-python-utils", "pet-reporter", "pet-telemetry", + "pet-uv", "pet-venv", "pet-virtualenv", "pet-virtualenvwrapper", @@ -11567,7 +11568,7 @@ dependencies = [ [[package]] name = "pet-conda" version = "0.1.0" -source = "git+https://github.com/microsoft/python-environment-tools.git?rev=e97b9508befa0062929da65a01054d25c4be861c#e97b9508befa0062929da65a01054d25c4be861c" +source = "git+https://github.com/microsoft/python-environment-tools.git?rev=1e86914c3ce2f3a08c0cedbcb0615a7f9fa7a5da#1e86914c3ce2f3a08c0cedbcb0615a7f9fa7a5da" dependencies = [ "env_logger 0.10.2", "lazy_static", @@ -11586,7 +11587,7 @@ dependencies = [ [[package]] name = "pet-core" version = "0.1.0" -source = "git+https://github.com/microsoft/python-environment-tools.git?rev=e97b9508befa0062929da65a01054d25c4be861c#e97b9508befa0062929da65a01054d25c4be861c" +source = "git+https://github.com/microsoft/python-environment-tools.git?rev=1e86914c3ce2f3a08c0cedbcb0615a7f9fa7a5da#1e86914c3ce2f3a08c0cedbcb0615a7f9fa7a5da" dependencies = [ "clap", "lazy_static", @@ -11601,7 +11602,7 @@ dependencies = [ [[package]] name = "pet-env-var-path" version = "0.1.0" -source = "git+https://github.com/microsoft/python-environment-tools.git?rev=e97b9508befa0062929da65a01054d25c4be861c#e97b9508befa0062929da65a01054d25c4be861c" +source = "git+https://github.com/microsoft/python-environment-tools.git?rev=1e86914c3ce2f3a08c0cedbcb0615a7f9fa7a5da#1e86914c3ce2f3a08c0cedbcb0615a7f9fa7a5da" dependencies = [ "lazy_static", "log", @@ -11617,7 +11618,7 @@ dependencies = [ [[package]] name = "pet-fs" version = "0.1.0" -source = "git+https://github.com/microsoft/python-environment-tools.git?rev=e97b9508befa0062929da65a01054d25c4be861c#e97b9508befa0062929da65a01054d25c4be861c" +source = "git+https://github.com/microsoft/python-environment-tools.git?rev=1e86914c3ce2f3a08c0cedbcb0615a7f9fa7a5da#1e86914c3ce2f3a08c0cedbcb0615a7f9fa7a5da" dependencies = [ "log", "msvc_spectre_libs", @@ -11626,7 +11627,7 @@ dependencies = [ [[package]] name = "pet-global-virtualenvs" version = "0.1.0" -source = "git+https://github.com/microsoft/python-environment-tools.git?rev=e97b9508befa0062929da65a01054d25c4be861c#e97b9508befa0062929da65a01054d25c4be861c" +source = "git+https://github.com/microsoft/python-environment-tools.git?rev=1e86914c3ce2f3a08c0cedbcb0615a7f9fa7a5da#1e86914c3ce2f3a08c0cedbcb0615a7f9fa7a5da" dependencies = [ "log", "msvc_spectre_libs", @@ -11639,7 +11640,7 @@ dependencies = [ [[package]] name = "pet-homebrew" version = "0.1.0" -source = "git+https://github.com/microsoft/python-environment-tools.git?rev=e97b9508befa0062929da65a01054d25c4be861c#e97b9508befa0062929da65a01054d25c4be861c" +source = "git+https://github.com/microsoft/python-environment-tools.git?rev=1e86914c3ce2f3a08c0cedbcb0615a7f9fa7a5da#1e86914c3ce2f3a08c0cedbcb0615a7f9fa7a5da" dependencies = [ "lazy_static", "log", @@ -11657,7 +11658,7 @@ dependencies = [ [[package]] name = "pet-jsonrpc" version = "0.1.0" -source = "git+https://github.com/microsoft/python-environment-tools.git?rev=e97b9508befa0062929da65a01054d25c4be861c#e97b9508befa0062929da65a01054d25c4be861c" +source = "git+https://github.com/microsoft/python-environment-tools.git?rev=1e86914c3ce2f3a08c0cedbcb0615a7f9fa7a5da#1e86914c3ce2f3a08c0cedbcb0615a7f9fa7a5da" dependencies = [ "env_logger 0.10.2", "log", @@ -11670,7 +11671,7 @@ dependencies = [ [[package]] name = "pet-linux-global-python" version = "0.1.0" -source = "git+https://github.com/microsoft/python-environment-tools.git?rev=e97b9508befa0062929da65a01054d25c4be861c#e97b9508befa0062929da65a01054d25c4be861c" +source = "git+https://github.com/microsoft/python-environment-tools.git?rev=1e86914c3ce2f3a08c0cedbcb0615a7f9fa7a5da#1e86914c3ce2f3a08c0cedbcb0615a7f9fa7a5da" dependencies = [ "log", "msvc_spectre_libs", @@ -11683,7 +11684,7 @@ dependencies = [ [[package]] name = "pet-mac-commandlinetools" version = "0.1.0" -source = "git+https://github.com/microsoft/python-environment-tools.git?rev=e97b9508befa0062929da65a01054d25c4be861c#e97b9508befa0062929da65a01054d25c4be861c" +source = "git+https://github.com/microsoft/python-environment-tools.git?rev=1e86914c3ce2f3a08c0cedbcb0615a7f9fa7a5da#1e86914c3ce2f3a08c0cedbcb0615a7f9fa7a5da" dependencies = [ "log", "msvc_spectre_libs", @@ -11696,7 +11697,7 @@ dependencies = [ [[package]] name = "pet-mac-python-org" version = "0.1.0" -source = "git+https://github.com/microsoft/python-environment-tools.git?rev=e97b9508befa0062929da65a01054d25c4be861c#e97b9508befa0062929da65a01054d25c4be861c" +source = "git+https://github.com/microsoft/python-environment-tools.git?rev=1e86914c3ce2f3a08c0cedbcb0615a7f9fa7a5da#1e86914c3ce2f3a08c0cedbcb0615a7f9fa7a5da" dependencies = [ "log", "msvc_spectre_libs", @@ -11709,7 +11710,7 @@ dependencies = [ [[package]] name = "pet-mac-xcode" version = "0.1.0" -source = "git+https://github.com/microsoft/python-environment-tools.git?rev=e97b9508befa0062929da65a01054d25c4be861c#e97b9508befa0062929da65a01054d25c4be861c" +source = "git+https://github.com/microsoft/python-environment-tools.git?rev=1e86914c3ce2f3a08c0cedbcb0615a7f9fa7a5da#1e86914c3ce2f3a08c0cedbcb0615a7f9fa7a5da" dependencies = [ "log", "msvc_spectre_libs", @@ -11722,7 +11723,7 @@ dependencies = [ [[package]] name = "pet-pipenv" version = "0.1.0" -source = "git+https://github.com/microsoft/python-environment-tools.git?rev=e97b9508befa0062929da65a01054d25c4be861c#e97b9508befa0062929da65a01054d25c4be861c" +source = "git+https://github.com/microsoft/python-environment-tools.git?rev=1e86914c3ce2f3a08c0cedbcb0615a7f9fa7a5da#1e86914c3ce2f3a08c0cedbcb0615a7f9fa7a5da" dependencies = [ "log", "msvc_spectre_libs", @@ -11735,7 +11736,7 @@ dependencies = [ [[package]] name = "pet-pixi" version = "0.1.0" -source = "git+https://github.com/microsoft/python-environment-tools.git?rev=e97b9508befa0062929da65a01054d25c4be861c#e97b9508befa0062929da65a01054d25c4be861c" +source = "git+https://github.com/microsoft/python-environment-tools.git?rev=1e86914c3ce2f3a08c0cedbcb0615a7f9fa7a5da#1e86914c3ce2f3a08c0cedbcb0615a7f9fa7a5da" dependencies = [ "log", "msvc_spectre_libs", @@ -11747,7 +11748,7 @@ dependencies = [ [[package]] name = "pet-poetry" version = "0.1.0" -source = "git+https://github.com/microsoft/python-environment-tools.git?rev=e97b9508befa0062929da65a01054d25c4be861c#e97b9508befa0062929da65a01054d25c4be861c" +source = "git+https://github.com/microsoft/python-environment-tools.git?rev=1e86914c3ce2f3a08c0cedbcb0615a7f9fa7a5da#1e86914c3ce2f3a08c0cedbcb0615a7f9fa7a5da" dependencies = [ "base64 0.22.1", "lazy_static", @@ -11768,7 +11769,7 @@ dependencies = [ [[package]] name = "pet-pyenv" version = "0.1.0" -source = "git+https://github.com/microsoft/python-environment-tools.git?rev=e97b9508befa0062929da65a01054d25c4be861c#e97b9508befa0062929da65a01054d25c4be861c" +source = "git+https://github.com/microsoft/python-environment-tools.git?rev=1e86914c3ce2f3a08c0cedbcb0615a7f9fa7a5da#1e86914c3ce2f3a08c0cedbcb0615a7f9fa7a5da" dependencies = [ "lazy_static", "log", @@ -11786,7 +11787,7 @@ dependencies = [ [[package]] name = "pet-python-utils" version = "0.1.0" -source = "git+https://github.com/microsoft/python-environment-tools.git?rev=e97b9508befa0062929da65a01054d25c4be861c#e97b9508befa0062929da65a01054d25c4be861c" +source = "git+https://github.com/microsoft/python-environment-tools.git?rev=1e86914c3ce2f3a08c0cedbcb0615a7f9fa7a5da#1e86914c3ce2f3a08c0cedbcb0615a7f9fa7a5da" dependencies = [ "env_logger 0.10.2", "lazy_static", @@ -11803,7 +11804,7 @@ dependencies = [ [[package]] name = "pet-reporter" version = "0.1.0" -source = "git+https://github.com/microsoft/python-environment-tools.git?rev=e97b9508befa0062929da65a01054d25c4be861c#e97b9508befa0062929da65a01054d25c4be861c" +source = "git+https://github.com/microsoft/python-environment-tools.git?rev=1e86914c3ce2f3a08c0cedbcb0615a7f9fa7a5da#1e86914c3ce2f3a08c0cedbcb0615a7f9fa7a5da" dependencies = [ "env_logger 0.10.2", "log", @@ -11817,7 +11818,7 @@ dependencies = [ [[package]] name = "pet-telemetry" version = "0.1.0" -source = "git+https://github.com/microsoft/python-environment-tools.git?rev=e97b9508befa0062929da65a01054d25c4be861c#e97b9508befa0062929da65a01054d25c4be861c" +source = "git+https://github.com/microsoft/python-environment-tools.git?rev=1e86914c3ce2f3a08c0cedbcb0615a7f9fa7a5da#1e86914c3ce2f3a08c0cedbcb0615a7f9fa7a5da" dependencies = [ "env_logger 0.10.2", "lazy_static", @@ -11829,10 +11830,22 @@ dependencies = [ "regex", ] +[[package]] +name = "pet-uv" +version = "0.1.0" +source = "git+https://github.com/microsoft/python-environment-tools.git?rev=1e86914c3ce2f3a08c0cedbcb0615a7f9fa7a5da#1e86914c3ce2f3a08c0cedbcb0615a7f9fa7a5da" +dependencies = [ + "log", + "pet-core", + "pet-python-utils", + "serde", + "toml 0.9.8", +] + [[package]] name = "pet-venv" version = "0.1.0" -source = "git+https://github.com/microsoft/python-environment-tools.git?rev=e97b9508befa0062929da65a01054d25c4be861c#e97b9508befa0062929da65a01054d25c4be861c" +source = "git+https://github.com/microsoft/python-environment-tools.git?rev=1e86914c3ce2f3a08c0cedbcb0615a7f9fa7a5da#1e86914c3ce2f3a08c0cedbcb0615a7f9fa7a5da" dependencies = [ "log", "msvc_spectre_libs", @@ -11844,7 +11857,7 @@ dependencies = [ [[package]] name = "pet-virtualenv" version = "0.1.0" -source = "git+https://github.com/microsoft/python-environment-tools.git?rev=e97b9508befa0062929da65a01054d25c4be861c#e97b9508befa0062929da65a01054d25c4be861c" +source = "git+https://github.com/microsoft/python-environment-tools.git?rev=1e86914c3ce2f3a08c0cedbcb0615a7f9fa7a5da#1e86914c3ce2f3a08c0cedbcb0615a7f9fa7a5da" dependencies = [ "log", "msvc_spectre_libs", @@ -11856,7 +11869,7 @@ dependencies = [ [[package]] name = "pet-virtualenvwrapper" version = "0.1.0" -source = "git+https://github.com/microsoft/python-environment-tools.git?rev=e97b9508befa0062929da65a01054d25c4be861c#e97b9508befa0062929da65a01054d25c4be861c" +source = "git+https://github.com/microsoft/python-environment-tools.git?rev=1e86914c3ce2f3a08c0cedbcb0615a7f9fa7a5da#1e86914c3ce2f3a08c0cedbcb0615a7f9fa7a5da" dependencies = [ "log", "msvc_spectre_libs", @@ -11869,7 +11882,7 @@ dependencies = [ [[package]] name = "pet-windows-registry" version = "0.1.0" -source = "git+https://github.com/microsoft/python-environment-tools.git?rev=e97b9508befa0062929da65a01054d25c4be861c#e97b9508befa0062929da65a01054d25c4be861c" +source = "git+https://github.com/microsoft/python-environment-tools.git?rev=1e86914c3ce2f3a08c0cedbcb0615a7f9fa7a5da#1e86914c3ce2f3a08c0cedbcb0615a7f9fa7a5da" dependencies = [ "lazy_static", "log", @@ -11887,7 +11900,7 @@ dependencies = [ [[package]] name = "pet-windows-store" version = "0.1.0" -source = "git+https://github.com/microsoft/python-environment-tools.git?rev=e97b9508befa0062929da65a01054d25c4be861c#e97b9508befa0062929da65a01054d25c4be861c" +source = "git+https://github.com/microsoft/python-environment-tools.git?rev=1e86914c3ce2f3a08c0cedbcb0615a7f9fa7a5da#1e86914c3ce2f3a08c0cedbcb0615a7f9fa7a5da" dependencies = [ "lazy_static", "log", diff --git a/Cargo.toml b/Cargo.toml index 4377b120450c8da185820ebb8c44a334ba8a3778..05ea7bceb818e33f8b550269f00c305ce6d7be0b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -583,14 +583,14 @@ partial-json-fixer = "0.5.3" parse_int = "0.9" pciid-parser = "0.8.0" pathdiff = "0.2" -pet = { git = "https://github.com/microsoft/python-environment-tools.git", rev = "e97b9508befa0062929da65a01054d25c4be861c" } -pet-conda = { git = "https://github.com/microsoft/python-environment-tools.git", rev = "e97b9508befa0062929da65a01054d25c4be861c" } -pet-core = { git = "https://github.com/microsoft/python-environment-tools.git", rev = "e97b9508befa0062929da65a01054d25c4be861c" } -pet-fs = { git = "https://github.com/microsoft/python-environment-tools.git", rev = "e97b9508befa0062929da65a01054d25c4be861c" } -pet-pixi = { git = "https://github.com/microsoft/python-environment-tools.git", rev = "e97b9508befa0062929da65a01054d25c4be861c" } -pet-poetry = { git = "https://github.com/microsoft/python-environment-tools.git", rev = "e97b9508befa0062929da65a01054d25c4be861c" } -pet-reporter = { git = "https://github.com/microsoft/python-environment-tools.git", rev = "e97b9508befa0062929da65a01054d25c4be861c" } -pet-virtualenv = { git = "https://github.com/microsoft/python-environment-tools.git", rev = "e97b9508befa0062929da65a01054d25c4be861c" } +pet = { git = "https://github.com/microsoft/python-environment-tools.git", rev = "1e86914c3ce2f3a08c0cedbcb0615a7f9fa7a5da" } +pet-conda = { git = "https://github.com/microsoft/python-environment-tools.git", rev = "1e86914c3ce2f3a08c0cedbcb0615a7f9fa7a5da" } +pet-core = { git = "https://github.com/microsoft/python-environment-tools.git", rev = "1e86914c3ce2f3a08c0cedbcb0615a7f9fa7a5da" } +pet-fs = { git = "https://github.com/microsoft/python-environment-tools.git", rev = "1e86914c3ce2f3a08c0cedbcb0615a7f9fa7a5da" } +pet-pixi = { git = "https://github.com/microsoft/python-environment-tools.git", rev = "1e86914c3ce2f3a08c0cedbcb0615a7f9fa7a5da" } +pet-poetry = { git = "https://github.com/microsoft/python-environment-tools.git", rev = "1e86914c3ce2f3a08c0cedbcb0615a7f9fa7a5da" } +pet-reporter = { git = "https://github.com/microsoft/python-environment-tools.git", rev = "1e86914c3ce2f3a08c0cedbcb0615a7f9fa7a5da" } +pet-virtualenv = { git = "https://github.com/microsoft/python-environment-tools.git", rev = "1e86914c3ce2f3a08c0cedbcb0615a7f9fa7a5da" } portable-pty = "0.9.0" postage = { version = "0.5", features = ["futures-traits"] } pretty_assertions = { version = "1.3.0", features = ["unstable"] } diff --git a/crates/languages/src/python.rs b/crates/languages/src/python.rs index 03ce559b87bb5f318758735c5903bfc51b7c1267..a451afa6f1f3fcc5e4aa2135611fa37fa2f0f39e 100644 --- a/crates/languages/src/python.rs +++ b/crates/languages/src/python.rs @@ -991,6 +991,8 @@ fn python_env_kind_display(k: &PythonEnvironmentKind) -> &'static str { PythonEnvironmentKind::VirtualEnvWrapper => "virtualenvwrapper", PythonEnvironmentKind::WindowsStore => "global (Windows Store)", PythonEnvironmentKind::WindowsRegistry => "global (Windows Registry)", + PythonEnvironmentKind::Uv => "uv", + PythonEnvironmentKind::UvWorkspace => "uv (Workspace)", } } @@ -998,6 +1000,8 @@ pub(crate) struct PythonToolchainProvider; static ENV_PRIORITY_LIST: &[PythonEnvironmentKind] = &[ // Prioritize non-Conda environments. + PythonEnvironmentKind::UvWorkspace, + PythonEnvironmentKind::Uv, PythonEnvironmentKind::Poetry, PythonEnvironmentKind::Pipenv, PythonEnvironmentKind::VirtualEnvWrapper, From 98e369285bd8c38dba9f50ca77b2e0dcb86811c5 Mon Sep 17 00:00:00 2001 From: Peter Tripp Date: Tue, 25 Nov 2025 18:31:52 -0500 Subject: [PATCH 040/749] languages: Recognize .clang-format as YAML (#43469) Clang-Format uses uses a YAML config file format. Use YAML language by default for `.clang-format` and `_clang-format` filenames. ([source](https://clang.llvm.org/docs/ClangFormatStyleOptions.html)) Add `#yaml-language-server: $schema` to `.clang-format` example in C language docs. Release Notes: - Added support for identifying. `.clang-format` files as YAML by default --- crates/languages/src/yaml/config.toml | 2 +- docs/src/languages/c.md | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/crates/languages/src/yaml/config.toml b/crates/languages/src/yaml/config.toml index e941497bf3f3ea56e7c5188fde3e8708cd36bb8a..8834b3205af810c26fc9a8835f1c2afe7a185d8c 100644 --- a/crates/languages/src/yaml/config.toml +++ b/crates/languages/src/yaml/config.toml @@ -1,6 +1,6 @@ name = "YAML" grammar = "yaml" -path_suffixes = ["yml", "yaml", "pixi.lock"] +path_suffixes = ["yml", "yaml", "pixi.lock", "clang-format"] line_comments = ["# "] autoclose_before = ",]}" brackets = [ diff --git a/docs/src/languages/c.md b/docs/src/languages/c.md index 7f6e0ba6b2ed24dd958c02a7606e2a569d08f8f1..565b0b5acbef78a23722020dcbad9300748dbb16 100644 --- a/docs/src/languages/c.md +++ b/docs/src/languages/c.md @@ -27,9 +27,10 @@ By default clang and gcc will recognize `*.C` and `*.H` (uppercase extensions) a ## Formatting -By default Zed will use the `clangd` language server for formatting C code. The Clangd is the same as the `clang-format` CLI tool. To configure this you can add a `.clang-format` file. For example: +By default Zed will use the `clangd` language server for formatting C code like the `clang-format` CLI tool. To configure this you can add a `.clang-format` file. For example: ```yaml +# yaml-language-server: $schema=https://json.schemastore.org/clang-format-21.x.json --- BasedOnStyle: GNU IndentWidth: 2 From e13e93063ce24a2ede88747c316d7279174878c8 Mon Sep 17 00:00:00 2001 From: Max Brunsfeld Date: Tue, 25 Nov 2025 17:33:10 -0800 Subject: [PATCH 041/749] Avoid continuing zeta requests that are cancelled before their throttle (#43505) Release Notes: - N/A --- crates/zeta/src/zeta.rs | 25 +++++++++++++++++++++++-- 1 file changed, 23 insertions(+), 2 deletions(-) diff --git a/crates/zeta/src/zeta.rs b/crates/zeta/src/zeta.rs index 8b54576a12f2ff788b6088299c30923b2ce8adda..26a2388a96e4a828fc4c7bd6fe5d3dbb57bfc911 100644 --- a/crates/zeta/src/zeta.rs +++ b/crates/zeta/src/zeta.rs @@ -289,6 +289,7 @@ struct ZetaProject { next_pending_prediction_id: usize, pending_predictions: ArrayVec, last_prediction_refresh: Option<(EntityId, Instant)>, + cancelled_predictions: HashSet, context: Option, Vec>>>, refresh_context_task: Option>>>, refresh_context_debounce_task: Option>>, @@ -601,6 +602,7 @@ impl Zeta { recent_paths: VecDeque::new(), registered_buffers: HashMap::default(), current_prediction: None, + cancelled_predictions: HashSet::default(), pending_predictions: ArrayVec::new(), next_pending_prediction_id: 0, last_prediction_refresh: None, @@ -1132,11 +1134,23 @@ impl Zeta { cx.background_executor().timer(timeout).await; } + // If this task was cancelled before the throttle timeout expired, + // do not perform a request. + let mut is_cancelled = true; this.update(cx, |this, cx| { - this.get_or_init_zeta_project(&project, cx) - .last_prediction_refresh = Some((throttle_entity, Instant::now())); + let project_state = this.get_or_init_zeta_project(&project, cx); + if !project_state + .cancelled_predictions + .remove(&pending_prediction_id) + { + project_state.last_prediction_refresh = Some((throttle_entity, Instant::now())); + is_cancelled = false; + } }) .ok(); + if is_cancelled { + return None; + } let edit_prediction_id = do_refresh(this.clone(), cx).await.log_err().flatten(); @@ -1144,6 +1158,10 @@ impl Zeta { // any pending predictions that were enqueued before it. this.update(cx, |this, cx| { let zeta_project = this.get_or_init_zeta_project(&project, cx); + zeta_project + .cancelled_predictions + .remove(&pending_prediction_id); + let mut pending_predictions = mem::take(&mut zeta_project.pending_predictions); for (ix, pending_prediction) in pending_predictions.iter().enumerate() { if pending_prediction.id == pending_prediction_id { @@ -1174,6 +1192,9 @@ impl Zeta { id: pending_prediction_id, task, }); + zeta_project + .cancelled_predictions + .insert(pending_prediction.id); self.cancel_pending_prediction(pending_prediction, cx); } } From 88ef5b137fc4cc8e9b194ef5b8a888f10872ce84 Mon Sep 17 00:00:00 2001 From: Anthony Eid <56899983+Anthony-Eid@users.noreply.github.com> Date: Tue, 25 Nov 2025 22:45:54 -0500 Subject: [PATCH 042/749] terminal: Update search match highlights on resize (#43507) The fix for this is emitting a wake-up event to tell the terminal to recalculate its search highlights on resize. Release Notes: - terminal: Fix bug where search match highlights wouldn't update their position when resizing the terminal. --- crates/terminal/src/terminal.rs | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/crates/terminal/src/terminal.rs b/crates/terminal/src/terminal.rs index 69b6be5f249b811273aed8ecd96ed82493a3596a..48073aee51a376d3700a3f818081f87fd24c5ee1 100644 --- a/crates/terminal/src/terminal.rs +++ b/crates/terminal/src/terminal.rs @@ -992,6 +992,12 @@ impl Terminal { } term.resize(new_bounds); + // If there are matches we need to emit a wake up event to + // invalidate the matches and recalculate their locations + // in the new terminal layout + if !self.matches.is_empty() { + cx.emit(Event::Wakeup); + } } InternalEvent::Clear => { trace!("Clearing"); From 56a2f9cfcf0c6a3c38f596b58002953763cd890f Mon Sep 17 00:00:00 2001 From: Anthony Eid <56899983+Anthony-Eid@users.noreply.github.com> Date: Tue, 25 Nov 2025 22:58:29 -0500 Subject: [PATCH 043/749] Revert "git: Make the version_control.{deleted/added} colors more accessible" (#43512) Reverts zed-industries/zed#43475 The colors ended up being too dark. Zed adds an opacity to the highlights. https://github.com/zed-industries/zed/blob/e13e93063ce24a2ede88747c316d7279174878c8/crates/editor/src/element.rs#L9195-L9200 Reverting to avoid having the colors go out in preview will fix shortly after. --- assets/themes/one/one.json | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/assets/themes/one/one.json b/assets/themes/one/one.json index 48db749a4b636963d6db714ddb055c9c15bc5494..6849cd05dc70752216789ae04e81fad232f7b14b 100644 --- a/assets/themes/one/one.json +++ b/assets/themes/one/one.json @@ -96,9 +96,9 @@ "terminal.ansi.bright_white": "#fafafaff", "terminal.ansi.dim_white": "#575d65ff", "link_text.hover": "#74ade8ff", - "version_control.added": "#2EA048ff", + "version_control.added": "#27a657ff", "version_control.modified": "#d3b020ff", - "version_control.deleted": "#78081Bff", + "version_control.deleted": "#e06c76ff", "version_control.conflict_marker.ours": "#a1c1811a", "version_control.conflict_marker.theirs": "#74ade81a", "conflict": "#dec184ff", @@ -497,9 +497,9 @@ "terminal.ansi.bright_white": "#ffffffff", "terminal.ansi.dim_white": "#aaaaaaff", "link_text.hover": "#5c78e2ff", - "version_control.added": "#2EA048ff", + "version_control.added": "#27a657ff", "version_control.modified": "#d3b020ff", - "version_control.deleted": "#F85149ff", + "version_control.deleted": "#e06c76ff", "conflict": "#a48819ff", "conflict.background": "#faf2e6ff", "conflict.border": "#f4e7d1ff", From 3072133e5965e16f956509a600faaef482a10195 Mon Sep 17 00:00:00 2001 From: qystishere Date: Wed, 26 Nov 2025 09:45:50 +0300 Subject: [PATCH 044/749] Improve bash detection on Windows (#43455) I have git installed via [scoop](https://scoop.sh). The current implementation finds `git.exe` in scoop's shims folder and then tries to find `bash.exe` relative to it. For example, `git.exe` (shim) is located at: ``` C:\Users\\scoop\shims\git.exe ``` And the code tries to find `bash.exe` at: ``` C:\Users\\scoop\shims\..\bin\bash.exe ``` which doesn't exist. This PR changes the logic to first check if `bash.exe` is available in PATH (using `which::which`), and only falls back to the git-relative path if that fails. --- crates/util/src/shell.rs | 33 +++++++++++++++++++++++---------- 1 file changed, 23 insertions(+), 10 deletions(-) diff --git a/crates/util/src/shell.rs b/crates/util/src/shell.rs index ba54f7b7784b45613b28067afe2748339e6b6c64..1f91939134b67a745c75afe264ceec0ef5d50f73 100644 --- a/crates/util/src/shell.rs +++ b/crates/util/src/shell.rs @@ -79,29 +79,42 @@ pub fn get_default_system_shell() -> String { } } -/// Get the default system shell, preferring git-bash on Windows. +/// Get the default system shell, preferring bash on Windows. pub fn get_default_system_shell_preferring_bash() -> String { if cfg!(windows) { - get_windows_git_bash().unwrap_or_else(|| get_windows_system_shell()) + get_windows_bash().unwrap_or_else(|| get_windows_system_shell()) } else { "/bin/sh".to_string() } } -pub fn get_windows_git_bash() -> Option { - static GIT_BASH: LazyLock> = LazyLock::new(|| { +pub fn get_windows_bash() -> Option { + use std::path::PathBuf; + + fn find_bash_in_scoop() -> Option { + let bash_exe = + PathBuf::from(std::env::var_os("USERPROFILE")?).join("scoop\\shims\\bash.exe"); + bash_exe.exists().then_some(bash_exe) + } + + fn find_bash_in_git() -> Option { // /path/to/git/cmd/git.exe/../../bin/bash.exe let git = which::which("git").ok()?; let git_bash = git.parent()?.parent()?.join("bin").join("bash.exe"); - if git_bash.is_file() { - log::info!("Found git-bash at {}", git_bash.display()); - Some(git_bash.to_string_lossy().to_string()) - } else { - None + git_bash.exists().then_some(git_bash) + } + + static BASH: LazyLock> = LazyLock::new(|| { + let bash = find_bash_in_scoop() + .or_else(|| find_bash_in_git()) + .map(|p| p.to_string_lossy().into_owned()); + if let Some(ref path) = bash { + log::info!("Found bash at {}", path); } + bash }); - (*GIT_BASH).clone() + (*BASH).clone() } pub fn get_windows_system_shell() -> String { From 9d8b5077b4fcc1c3ff65ed807de34cc773e59d54 Mon Sep 17 00:00:00 2001 From: Oscar Vargas Torres <1676245+oscarvarto@users.noreply.github.com> Date: Wed, 26 Nov 2025 00:48:06 -0600 Subject: [PATCH 045/749] zeta: Avoid logging an error for not having SWEEP_AI_TOKEN (#43504) Closes #43503 Release Notes: - Fixes ERROR No SWEEP_AI_TOKEN environment variable set Co-authored-by: oscarvarto --- crates/zeta/src/sweep_ai.rs | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/crates/zeta/src/sweep_ai.rs b/crates/zeta/src/sweep_ai.rs index 49870c7c9e917468e70062cbc234e9774fb3668b..c88dda2ae2fd11dd37965e58560df9e98528c9d9 100644 --- a/crates/zeta/src/sweep_ai.rs +++ b/crates/zeta/src/sweep_ai.rs @@ -1,4 +1,4 @@ -use anyhow::{Context as _, Result}; +use anyhow::Result; use cloud_llm_client::predict_edits_v3::Event; use futures::AsyncReadExt as _; use gpui::{ @@ -17,7 +17,6 @@ use std::{ sync::Arc, time::Instant, }; -use util::ResultExt as _; use crate::{EditPrediction, EditPredictionId, EditPredictionInputs}; @@ -31,9 +30,7 @@ pub struct SweepAi { impl SweepAi { pub fn new(cx: &App) -> Self { SweepAi { - api_token: std::env::var("SWEEP_AI_TOKEN") - .context("No SWEEP_AI_TOKEN environment variable set") - .log_err(), + api_token: std::env::var("SWEEP_AI_TOKEN").ok(), debug_info: debug_info(cx), } } From 00e93bfa113a3daed6e4a97a7244ad04d58453ee Mon Sep 17 00:00:00 2001 From: Lukas Wirth Date: Wed, 26 Nov 2025 09:00:46 +0100 Subject: [PATCH 046/749] shell: Correctly identifiy `powershell` shells on windows (#43526) Release Notes: - Fixed zed only finding pwsh but not powershell on windows --- crates/askpass/src/askpass.rs | 1 + crates/gpui/src/platform/windows/platform.rs | 11 +++++---- crates/util/src/shell.rs | 24 +++++++++++++------- 3 files changed, 23 insertions(+), 13 deletions(-) diff --git a/crates/askpass/src/askpass.rs b/crates/askpass/src/askpass.rs index 25db3144ccb10b9cac1b8d8555ea9924e193468c..a9047a567fd3b6323fb6edc64be4854f4da0a958 100644 --- a/crates/askpass/src/askpass.rs +++ b/crates/askpass/src/askpass.rs @@ -250,6 +250,7 @@ impl PasswordProxy { .await .with_context(|| format!("creating askpass script at {askpass_script_path:?}"))?; make_file_executable(&askpass_script_path).await?; + // todo(shell): There might be no powershell on the system #[cfg(target_os = "windows")] let askpass_helper = format!( "powershell.exe -ExecutionPolicy Bypass -File {}", diff --git a/crates/gpui/src/platform/windows/platform.rs b/crates/gpui/src/platform/windows/platform.rs index b7f13f1fab495b1040d1be8e7b86376c450b5f7e..006099c3828efb11b0981e81635fba0c452c8560 100644 --- a/crates/gpui/src/platform/windows/platform.rs +++ b/crates/gpui/src/platform/windows/platform.rs @@ -389,11 +389,12 @@ impl Platform for WindowsPlatform { #[allow( clippy::disallowed_methods, reason = "We are restarting ourselves, using std command thus is fine" - )] - let restart_process = util::command::new_std_command("powershell.exe") - .arg("-command") - .arg(script) - .spawn(); + )] // todo(shell): There might be no powershell on the system + let restart_process = + util::command::new_std_command(util::shell::get_windows_system_shell()) + .arg("-command") + .arg(script) + .spawn(); match restart_process { Ok(_) => self.quit(), diff --git a/crates/util/src/shell.rs b/crates/util/src/shell.rs index 1f91939134b67a745c75afe264ceec0ef5d50f73..d6cf5e1d380109aa4fcfc4e55a4c469ba1903add 100644 --- a/crates/util/src/shell.rs +++ b/crates/util/src/shell.rs @@ -204,14 +204,22 @@ pub fn get_windows_system_shell() -> String { } static SYSTEM_SHELL: LazyLock = LazyLock::new(|| { - find_pwsh_in_programfiles(false, false) - .or_else(|| find_pwsh_in_programfiles(true, false)) - .or_else(|| find_pwsh_in_msix(false)) - .or_else(|| find_pwsh_in_programfiles(false, true)) - .or_else(|| find_pwsh_in_msix(true)) - .or_else(|| find_pwsh_in_programfiles(true, true)) - .or_else(find_pwsh_in_scoop) - .map(|p| p.to_string_lossy().into_owned()) + let locations = [ + || find_pwsh_in_programfiles(false, false), + || find_pwsh_in_programfiles(true, false), + || find_pwsh_in_msix(false), + || find_pwsh_in_programfiles(false, true), + || find_pwsh_in_msix(true), + || find_pwsh_in_programfiles(true, true), + || find_pwsh_in_scoop(), + || which::which_global("pwsh.exe").ok(), + || which::which_global("powershell.exe").ok(), + ]; + + locations + .into_iter() + .find_map(|f| f()) + .map(|p| p.to_string_lossy().trim().to_owned()) .inspect(|shell| log::info!("Found powershell in: {}", shell)) .unwrap_or_else(|| { log::warn!("Powershell not found, falling back to `cmd`"); From 425d4c73f3fbf4362520d2ea2c1205eeb948b31d Mon Sep 17 00:00:00 2001 From: Bhuminjay Soni Date: Wed, 26 Nov 2025 14:31:20 +0530 Subject: [PATCH 047/749] git: Use correct file mode when staging (#41900) Closes #28667 Release Notes: - Fixed git not preserving file mode when committing. Now if an input file is executable it will be preserved when committed with Zed. --------- Signed-off-by: 11happy Signed-off-by: 11happy Co-authored-by: Jakub Konka --- Cargo.lock | 10 ++++ crates/fs/Cargo.toml | 1 + crates/fs/src/fake_git_repo.rs | 1 + crates/fs/src/fs.rs | 11 ++++ crates/git/src/repository.rs | 6 +- crates/project/src/git_store.rs | 15 ++++- crates/project/src/project_tests.rs | 85 +++++++++++++++++++++++++++++ 7 files changed, 126 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 38b7a6939878fad9bfa259ee03189e018ef507c9..3c19e1b79bc859e77c99b992f4f5617894da82df 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6406,6 +6406,7 @@ dependencies = [ "git", "gpui", "ignore", + "is_executable", "libc", "log", "notify 8.2.0", @@ -8436,6 +8437,15 @@ dependencies = [ "once_cell", ] +[[package]] +name = "is_executable" +version = "1.0.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "baabb8b4867b26294d818bf3f651a454b6901431711abb96e296245888d6e8c4" +dependencies = [ + "windows-sys 0.60.2", +] + [[package]] name = "is_terminal_polyfill" version = "1.70.1" diff --git a/crates/fs/Cargo.toml b/crates/fs/Cargo.toml index 15093b3a5b5e18ce0ddca1e9c23350e1ac46d66e..52063eeddcc3aa74adae33f3a78c74ecb6b6f04c 100644 --- a/crates/fs/Cargo.toml +++ b/crates/fs/Cargo.toml @@ -33,6 +33,7 @@ tempfile.workspace = true text.workspace = true time.workspace = true util.workspace = true +is_executable = "1.0.5" [target.'cfg(target_os = "macos")'.dependencies] fsevent.workspace = true diff --git a/crates/fs/src/fake_git_repo.rs b/crates/fs/src/fake_git_repo.rs index c9a41243aa641318026db208d78a64429cfeb1ab..febef94d8cd8c5f10c27dd5c62e8076fb5fb784d 100644 --- a/crates/fs/src/fake_git_repo.rs +++ b/crates/fs/src/fake_git_repo.rs @@ -138,6 +138,7 @@ impl GitRepository for FakeGitRepository { path: RepoPath, content: Option, _env: Arc>, + _is_executable: bool, ) -> BoxFuture<'_, anyhow::Result<()>> { self.with_state_async(true, move |state| { if let Some(message) = &state.simulated_index_write_error_message { diff --git a/crates/fs/src/fs.rs b/crates/fs/src/fs.rs index 93192ecd2bd2449dafa622a69045be6811a43cf7..5a6e4bdfdba48af25342d4d1ecfafd1d4ce0709b 100644 --- a/crates/fs/src/fs.rs +++ b/crates/fs/src/fs.rs @@ -32,6 +32,7 @@ use std::mem::MaybeUninit; use async_tar::Archive; use futures::{AsyncRead, Stream, StreamExt, future::BoxFuture}; use git::repository::{GitRepository, RealGitRepository}; +use is_executable::IsExecutable; use rope::Rope; use serde::{Deserialize, Serialize}; use smol::io::AsyncWriteExt; @@ -208,6 +209,7 @@ pub struct Metadata { pub is_dir: bool, pub len: u64, pub is_fifo: bool, + pub is_executable: bool, } /// Filesystem modification time. The purpose of this newtype is to discourage use of operations @@ -895,6 +897,12 @@ impl Fs for RealFs { #[cfg(unix)] let is_fifo = metadata.file_type().is_fifo(); + let path_buf = path.to_path_buf(); + let is_executable = self + .executor + .spawn(async move { path_buf.is_executable() }) + .await; + Ok(Some(Metadata { inode, mtime: MTime(metadata.modified().unwrap_or(SystemTime::UNIX_EPOCH)), @@ -902,6 +910,7 @@ impl Fs for RealFs { is_symlink, is_dir: metadata.file_type().is_dir(), is_fifo, + is_executable, })) } @@ -2602,6 +2611,7 @@ impl Fs for FakeFs { is_dir: false, is_symlink, is_fifo: false, + is_executable: false, }, FakeFsEntry::Dir { inode, mtime, len, .. @@ -2612,6 +2622,7 @@ impl Fs for FakeFs { is_dir: true, is_symlink, is_fifo: false, + is_executable: false, }, FakeFsEntry::Symlink { .. } => unreachable!(), })) diff --git a/crates/git/src/repository.rs b/crates/git/src/repository.rs index 9beb3d838382d9267afdb081211647139f85b75e..03b29eb4a7a28ddc13bdbfb23422f98baa82ae36 100644 --- a/crates/git/src/repository.rs +++ b/crates/git/src/repository.rs @@ -400,6 +400,7 @@ pub trait GitRepository: Send + Sync { path: RepoPath, content: Option, env: Arc>, + is_executable: bool, ) -> BoxFuture<'_, anyhow::Result<()>>; /// Returns the URL of the remote with the given name. @@ -987,12 +988,15 @@ impl GitRepository for RealGitRepository { path: RepoPath, content: Option, env: Arc>, + is_executable: bool, ) -> BoxFuture<'_, anyhow::Result<()>> { let working_directory = self.working_directory(); let git_binary_path = self.any_git_binary_path.clone(); self.executor .spawn(async move { let working_directory = working_directory?; + let mode = if is_executable { "100755" } else { "100644" }; + if let Some(content) = content { let mut child = new_smol_command(&git_binary_path) .current_dir(&working_directory) @@ -1013,7 +1017,7 @@ impl GitRepository for RealGitRepository { let output = new_smol_command(&git_binary_path) .current_dir(&working_directory) .envs(env.iter()) - .args(["update-index", "--add", "--cacheinfo", "100644", sha]) + .args(["update-index", "--add", "--cacheinfo", mode, sha]) .arg(path.as_unix_str()) .output() .await?; diff --git a/crates/project/src/git_store.rs b/crates/project/src/git_store.rs index bde9261fa28b8ed0d6c6a79fd02b90177e52a98e..8b83fa48e9b61a7200a001f4d42227b1c2302874 100644 --- a/crates/project/src/git_store.rs +++ b/crates/project/src/git_store.rs @@ -301,6 +301,7 @@ impl std::ops::Deref for Repository { #[derive(Clone)] pub enum RepositoryState { Local { + fs: Arc, backend: Arc, environment: Arc>, }, @@ -4288,6 +4289,7 @@ impl Repository { RepositoryState::Local { backend, environment, + .. } => backend.run_hook(hook, environment.clone()).await, RepositoryState::Remote { project_id, client } => { client @@ -4580,6 +4582,7 @@ impl Repository { let id = self.id; let this = cx.weak_entity(); let git_store = self.git_store.clone(); + let abs_path = self.snapshot.repo_path_to_abs_path(&path); self.send_keyed_job( Some(GitJobKey::WriteIndex(vec![path.clone()])), None, @@ -4588,14 +4591,21 @@ impl Repository { "start updating index text for buffer {}", path.as_unix_str() ); + match git_repo { RepositoryState::Local { + fs, backend, environment, .. } => { + let executable = match fs.metadata(&abs_path).await { + Ok(Some(meta)) => meta.is_executable, + Ok(None) => false, + Err(_err) => false, + }; backend - .set_index_text(path.clone(), content, environment.clone()) + .set_index_text(path.clone(), content, environment.clone(), executable) .await?; } RepositoryState::Remote { project_id, client } => { @@ -5164,6 +5174,7 @@ impl Repository { cx: &mut Context, ) -> mpsc::UnboundedSender { let (job_tx, mut job_rx) = mpsc::unbounded::(); + let fs_cloned = fs.clone(); cx.spawn(async move |_, cx| { let environment = project_environment @@ -5195,8 +5206,8 @@ impl Repository { backend.clone(), ); } - let state = RepositoryState::Local { + fs: fs_cloned, backend, environment: Arc::new(environment), }; diff --git a/crates/project/src/project_tests.rs b/crates/project/src/project_tests.rs index d42859de5d5491d4a5388d311266e22962889f35..1cbaf950e818956f55cb52eed997c1e3819ced34 100644 --- a/crates/project/src/project_tests.rs +++ b/crates/project/src/project_tests.rs @@ -8174,6 +8174,91 @@ async fn test_single_file_diffs(cx: &mut gpui::TestAppContext) { }); } +// TODO: Should we test this on Windows also? +#[gpui::test] +#[cfg(not(windows))] +async fn test_staging_hunk_preserve_executable_permission(cx: &mut gpui::TestAppContext) { + use std::os::unix::fs::PermissionsExt; + init_test(cx); + cx.executor().allow_parking(); + let committed_contents = "bar\n"; + let file_contents = "baz\n"; + let root = TempTree::new(json!({ + "project": { + "foo": committed_contents + }, + })); + + let work_dir = root.path().join("project"); + let file_path = work_dir.join("foo"); + let repo = git_init(work_dir.as_path()); + let mut perms = std::fs::metadata(&file_path).unwrap().permissions(); + perms.set_mode(0o755); + std::fs::set_permissions(&file_path, perms).unwrap(); + git_add("foo", &repo); + git_commit("Initial commit", &repo); + std::fs::write(&file_path, file_contents).unwrap(); + + let project = Project::test( + Arc::new(RealFs::new(None, cx.executor())), + [root.path()], + cx, + ) + .await; + + let buffer = project + .update(cx, |project, cx| { + project.open_local_buffer(file_path.as_path(), cx) + }) + .await + .unwrap(); + + let snapshot = buffer.read_with(cx, |buffer, _| buffer.snapshot()); + + let uncommitted_diff = project + .update(cx, |project, cx| { + project.open_uncommitted_diff(buffer.clone(), cx) + }) + .await + .unwrap(); + + uncommitted_diff.update(cx, |diff, cx| { + let hunks = diff.hunks(&snapshot, cx).collect::>(); + diff.stage_or_unstage_hunks(true, &hunks, &snapshot, true, cx); + }); + + cx.run_until_parked(); + + let output = smol::process::Command::new("git") + .current_dir(&work_dir) + .args(["diff", "--staged"]) + .output() + .await + .unwrap(); + + let staged_diff = String::from_utf8_lossy(&output.stdout); + + assert!( + !staged_diff.contains("new mode 100644"), + "Staging should not change file mode from 755 to 644.\ngit diff --staged:\n{}", + staged_diff + ); + + let output = smol::process::Command::new("git") + .current_dir(&work_dir) + .args(["ls-files", "-s"]) + .output() + .await + .unwrap(); + let index_contents = String::from_utf8_lossy(&output.stdout); + + assert!( + index_contents.contains("100755"), + "Index should show file as executable (100755).\ngit ls-files -s:\n{}", + index_contents + ); +} + #[gpui::test] async fn test_repository_and_path_for_project_path( background_executor: BackgroundExecutor, From 9150346a43483eacc51a368d6c48257309c6186e Mon Sep 17 00:00:00 2001 From: Floyd Wang Date: Wed, 26 Nov 2025 17:03:52 +0800 Subject: [PATCH 048/749] outline_panel: Fix the panel frequent flickering during search (#43530) The outline panel flickers when searching or when the file content changes. This happens because an empty UI appears during the search process, but it only lasts for a few milliseconds, so we can safely ignore it. ## Before https://github.com/user-attachments/assets/9b409827-75ee-4a45-864a-58f0ca43191f ## After https://github.com/user-attachments/assets/b6d48143-1f1a-4811-8754-0a679428eec2 Release Notes: - N/A --- crates/outline_panel/src/outline_panel.rs | 63 +++++++++-------------- 1 file changed, 24 insertions(+), 39 deletions(-) diff --git a/crates/outline_panel/src/outline_panel.rs b/crates/outline_panel/src/outline_panel.rs index cb857a72898bbd6f4161a0f4d218394efeab5c7e..1e649b2eb64fda970f845e9376be3f61944dde85 100644 --- a/crates/outline_panel/src/outline_panel.rs +++ b/crates/outline_panel/src/outline_panel.rs @@ -111,8 +111,6 @@ pub struct OutlinePanel { selected_entry: SelectedEntry, active_item: Option, _subscriptions: Vec, - updating_fs_entries: bool, - updating_cached_entries: bool, new_entries_for_fs_update: HashSet, fs_entries_update_task: Task<()>, cached_entries_update_task: Task<()>, @@ -853,8 +851,6 @@ impl OutlinePanel { width: None, active_item: None, pending_serialization: Task::ready(None), - updating_fs_entries: false, - updating_cached_entries: false, new_entries_for_fs_update: HashSet::default(), preserve_selection_on_buffer_fold_toggles: HashSet::default(), pending_default_expansion_depth: None, @@ -2658,7 +2654,6 @@ impl OutlinePanel { let repo_snapshots = self.project.update(cx, |project, cx| { project.git_store().read(cx).repo_snapshots(cx) }); - self.updating_fs_entries = true; self.fs_entries_update_task = cx.spawn_in(window, async move |outline_panel, cx| { if let Some(debounce) = debounce { cx.background_executor().timer(debounce).await; @@ -3016,7 +3011,6 @@ impl OutlinePanel { outline_panel .update_in(cx, |outline_panel, window, cx| { - outline_panel.updating_fs_entries = false; outline_panel.new_entries_for_fs_update.clear(); outline_panel.excerpts = new_excerpts; outline_panel.collapsed_entries = new_collapsed_entries; @@ -3579,7 +3573,6 @@ impl OutlinePanel { let is_singleton = self.is_singleton_active(cx); let query = self.query(cx); - self.updating_cached_entries = true; self.cached_entries_update_task = cx.spawn_in(window, async move |outline_panel, cx| { if let Some(debounce) = debounce { cx.background_executor().timer(debounce).await; @@ -3612,7 +3605,6 @@ impl OutlinePanel { } outline_panel.autoscroll(cx); - outline_panel.updating_cached_entries = false; cx.notify(); }) .ok(); @@ -4542,12 +4534,10 @@ impl OutlinePanel { cx: &mut Context, ) -> impl IntoElement { let contents = if self.cached_entries.is_empty() { - let header = if self.updating_fs_entries || self.updating_cached_entries { - None - } else if query.is_some() { - Some("No matches for query") + let header = if query.is_some() { + "No matches for query" } else { - Some("No outlines available") + "No outlines available" }; v_flex() @@ -4556,33 +4546,28 @@ impl OutlinePanel { .flex_1() .justify_center() .size_full() - .when_some(header, |panel, header| { - panel - .child(h_flex().justify_center().child(Label::new(header))) - .when_some(query.clone(), |panel, query| { - panel.child( - h_flex() - .px_0p5() - .justify_center() - .bg(cx.theme().colors().element_selected.opacity(0.2)) - .child(Label::new(query)), - ) - }) - .child(h_flex().justify_center().child({ - let keystroke = match self.position(window, cx) { - DockPosition::Left => { - window.keystroke_text_for(&workspace::ToggleLeftDock) - } - DockPosition::Bottom => { - window.keystroke_text_for(&workspace::ToggleBottomDock) - } - DockPosition::Right => { - window.keystroke_text_for(&workspace::ToggleRightDock) - } - }; - Label::new(format!("Toggle Panel With {keystroke}")).color(Color::Muted) - })) + .child(h_flex().justify_center().child(Label::new(header))) + .when_some(query, |panel, query| { + panel.child( + h_flex() + .px_0p5() + .justify_center() + .bg(cx.theme().colors().element_selected.opacity(0.2)) + .child(Label::new(query)), + ) }) + .child(h_flex().justify_center().child({ + let keystroke = match self.position(window, cx) { + DockPosition::Left => window.keystroke_text_for(&workspace::ToggleLeftDock), + DockPosition::Bottom => { + window.keystroke_text_for(&workspace::ToggleBottomDock) + } + DockPosition::Right => { + window.keystroke_text_for(&workspace::ToggleRightDock) + } + }; + Label::new(format!("Toggle Panel With {keystroke}")).color(Color::Muted) + })) } else { let list_contents = { let items_len = self.cached_entries.len(); From 684a58fc84491d21d5a034ac442f14f8f0bdb6d4 Mon Sep 17 00:00:00 2001 From: ihavecoke Date: Wed, 26 Nov 2025 17:09:26 +0800 Subject: [PATCH 049/749] Implement vertical scrolling for extended keymap load error information (#42542) This PR fix an issue where, if an error occurs while loading the keymap file during application startup, an excessively long error message would be truncated and not fully displayed. Before: before After: image Release Notes: - N/A --- crates/workspace/src/notifications.rs | 19 ++++++++++++++++--- 1 file changed, 16 insertions(+), 3 deletions(-) diff --git a/crates/workspace/src/notifications.rs b/crates/workspace/src/notifications.rs index 6c1156b83396d1266bc46bca67f10f3f57adfec4..75c35cda22d72d659040154a079fe78af78cf414 100644 --- a/crates/workspace/src/notifications.rs +++ b/crates/workspace/src/notifications.rs @@ -593,9 +593,9 @@ pub mod simple_message_notification { use gpui::{ AnyElement, DismissEvent, EventEmitter, FocusHandle, Focusable, ParentElement, Render, - SharedString, Styled, + ScrollHandle, SharedString, Styled, }; - use ui::prelude::*; + use ui::{WithScrollbar, prelude::*}; use crate::notifications::NotificationFrame; @@ -617,6 +617,7 @@ pub mod simple_message_notification { show_close_button: bool, show_suppress_button: bool, title: Option, + scroll_handle: ScrollHandle, } impl Focusable for MessageNotification { @@ -661,6 +662,7 @@ pub mod simple_message_notification { show_suppress_button: true, title: None, focus_handle: cx.focus_handle(), + scroll_handle: ScrollHandle::new(), } } @@ -777,7 +779,18 @@ pub mod simple_message_notification { fn render(&mut self, window: &mut Window, cx: &mut Context) -> impl IntoElement { NotificationFrame::new() .with_title(self.title.clone()) - .with_content((self.build_content)(window, cx)) + .with_content( + div() + .child( + div() + .id("message-notification-content") + .max_h(vh(0.6, window)) + .overflow_y_scroll() + .track_scroll(&self.scroll_handle.clone()) + .child((self.build_content)(window, cx)), + ) + .vertical_scrollbar_for(self.scroll_handle.clone(), window, cx), + ) .show_close_button(self.show_close_button) .show_suppress_button(self.show_suppress_button) .on_close(cx.listener(|_, suppress, _, cx| { From c2cb76b026f6dc73d8b2b91b3c56be5a78a1e473 Mon Sep 17 00:00:00 2001 From: Lukas Wirth Date: Wed, 26 Nov 2025 12:43:30 +0100 Subject: [PATCH 050/749] rope: Turn `ChunkSlice::slice` panics into error logs (#43538) While logically not really correct, its better than tearing down the application until we figure out the root cause here Release Notes: - N/A *or* Added/Fixed/Improved ... --- crates/rope/src/chunk.rs | 139 ++++++++++++++++++++------------------- crates/rope/src/rope.rs | 4 +- 2 files changed, 74 insertions(+), 69 deletions(-) diff --git a/crates/rope/src/chunk.rs b/crates/rope/src/chunk.rs index 7ada5c2052481408bc5af56740f8e35916623f14..95df79d64bb401edf6220ba573be854297226cfe 100644 --- a/crates/rope/src/chunk.rs +++ b/crates/rope/src/chunk.rs @@ -127,39 +127,15 @@ impl Chunk { #[track_caller] #[inline(always)] - pub fn assert_char_boundary(&self, offset: usize) { + pub fn assert_char_boundary(&self, offset: usize) -> bool { if self.is_char_boundary(offset) { - return; + return true; } - panic_char_boundary(self, offset); - - #[cold] - #[inline(never)] - #[track_caller] - fn panic_char_boundary(chunk: &Chunk, offset: usize) { - if offset > chunk.text.len() { - panic!( - "byte index {} is out of bounds of `{:?}` (length: {})", - offset, - chunk.text, - chunk.text.len() - ); - } - // find the character - let char_start = chunk.floor_char_boundary(offset); - // `char_start` must be less than len and a char boundary - let ch = chunk - .text - .get(char_start..) - .unwrap() - .chars() - .next() - .unwrap(); - let char_range = char_start..char_start + ch.len_utf8(); - panic!( - "byte index {} is not a char boundary; it is inside {:?} (bytes {:?})", - offset, ch, char_range, - ); + if PANIC { + panic_char_boundary(&self.text, offset); + } else { + log_err_char_boundary(&self.text, offset); + false } } } @@ -230,10 +206,7 @@ impl<'a> ChunkSlice<'a> { } #[inline(always)] - pub fn slice(self, range: Range) -> Self { - let mask = (1 as Bitmap) - .unbounded_shl(range.end as u32) - .wrapping_sub(1); + pub fn slice(self, mut range: Range) -> Self { if range.start == MAX_BASE { Self { chars: 0, @@ -243,8 +216,15 @@ impl<'a> ChunkSlice<'a> { text: "", } } else { - self.assert_char_boundary(range.start); - self.assert_char_boundary(range.end); + if !self.assert_char_boundary::(range.start) { + range.start = self.text.ceil_char_boundary(range.start); + } + if !self.assert_char_boundary::(range.end) { + range.end = self.text.floor_char_boundary(range.end); + } + let mask = (1 as Bitmap) + .unbounded_shl(range.end as u32) + .wrapping_sub(1); Self { chars: (self.chars & mask) >> range.start, chars_utf16: (self.chars_utf16 & mask) >> range.start, @@ -381,38 +361,15 @@ impl<'a> ChunkSlice<'a> { #[track_caller] #[inline(always)] - pub fn assert_char_boundary(&self, offset: usize) { + pub fn assert_char_boundary(&self, offset: usize) -> bool { if self.is_char_boundary(offset) { - return; + return true; } - panic_char_boundary(self, offset); - - #[cold] - #[inline(never)] - fn panic_char_boundary(chunk: &ChunkSlice, offset: usize) { - if offset > chunk.text.len() { - panic!( - "byte index {} is out of bounds of `{:?}` (length: {})", - offset, - chunk.text, - chunk.text.len() - ); - } - // find the character - let char_start = chunk.floor_char_boundary(offset); - // `char_start` must be less than len and a char boundary - let ch = chunk - .text - .get(char_start..) - .unwrap() - .chars() - .next() - .unwrap(); - let char_range = char_start..char_start + ch.len_utf8(); - panic!( - "byte index {} is not a char boundary; it is inside {:?} (bytes {:?})", - offset, ch, char_range, - ); + if PANIC { + panic_char_boundary(self.text, offset); + } else { + log_err_char_boundary(self.text, offset); + false } } @@ -696,6 +653,54 @@ fn nth_set_bit(v: u128, n: usize) -> usize { } } +#[cold] +#[inline(never)] +#[track_caller] +fn panic_char_boundary(text: &str, offset: usize) -> ! { + if offset > text.len() { + panic!( + "byte index {} is out of bounds of `{:?}` (length: {})", + offset, + text, + text.len() + ); + } + // find the character + let char_start = text.floor_char_boundary(offset); + // `char_start` must be less than len and a char boundary + let ch = text.get(char_start..).unwrap().chars().next().unwrap(); + let char_range = char_start..char_start + ch.len_utf8(); + panic!( + "byte index {} is not a char boundary; it is inside {:?} (bytes {:?})", + offset, ch, char_range, + ); +} + +#[cold] +#[inline(never)] +#[track_caller] +fn log_err_char_boundary(text: &str, offset: usize) { + if offset > text.len() { + log::error!( + "byte index {} is out of bounds of `{:?}` (length: {})", + offset, + text, + text.len() + ); + } + // find the character + let char_start = text.floor_char_boundary(offset); + // `char_start` must be less than len and a char boundary + let ch = text.get(char_start..).unwrap().chars().next().unwrap(); + let char_range = char_start..char_start + ch.len_utf8(); + log::error!( + "byte index {} is not a char boundary; it is inside {:?} (bytes {:?})", + offset, + ch, + char_range, + ); +} + #[inline(always)] fn nth_set_bit_u64(v: u64, mut n: u64) -> u64 { let v = v.reverse_bits(); diff --git a/crates/rope/src/rope.rs b/crates/rope/src/rope.rs index 32894fb84469287fb1474efc57d8180bdee13466..8379045be245cadaf79800f1d57ff418cdd24b40 100644 --- a/crates/rope/src/rope.rs +++ b/crates/rope/src/rope.rs @@ -58,7 +58,7 @@ impl Rope { match item { Some(chunk) => { let chunk_offset = offset - start; - chunk.assert_char_boundary(chunk_offset); + chunk.assert_char_boundary::(chunk_offset); } None => { panic!( @@ -716,7 +716,7 @@ impl<'a> Chunks<'a> { }; let chunk_offset = offset - chunks.start(); if let Some(chunk) = chunks.item() { - chunk.assert_char_boundary(chunk_offset); + chunk.assert_char_boundary::(chunk_offset); } Self { chunks, From b9af6645e3824a6bc94a5c2b051fb857c104bbe8 Mon Sep 17 00:00:00 2001 From: Lukas Wirth Date: Wed, 26 Nov 2025 13:01:01 +0100 Subject: [PATCH 051/749] gpui: Return `None` for non-existing credentials in `read_credentials` on windows (#43540) Release Notes: - N/A *or* Added/Fixed/Improved ... --- crates/gpui/src/platform/windows/platform.rs | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/crates/gpui/src/platform/windows/platform.rs b/crates/gpui/src/platform/windows/platform.rs index 006099c3828efb11b0981e81635fba0c452c8560..942cb62d2216c8d7cd5ea4cf75c4e4fa4a7d007f 100644 --- a/crates/gpui/src/platform/windows/platform.rs +++ b/crates/gpui/src/platform/windows/platform.rs @@ -642,15 +642,24 @@ impl Platform for WindowsPlatform { .collect_vec(); self.foreground_executor().spawn(async move { let mut credentials: *mut CREDENTIALW = std::ptr::null_mut(); - unsafe { + let result = unsafe { CredReadW( PCWSTR::from_raw(target_name.as_ptr()), CRED_TYPE_GENERIC, None, &mut credentials, - )? + ) }; + if let Err(err) = result { + // ERROR_NOT_FOUND means the credential doesn't exist. + // Return Ok(None) to match macOS and Linux behavior. + if err.code().0 == ERROR_NOT_FOUND.0 as i32 { + return Ok(None); + } + return Err(err.into()); + } + if credentials.is_null() { Ok(None) } else { From 1e6a05d0d87652c708d41e97f9d31af4a9cf6cd0 Mon Sep 17 00:00:00 2001 From: Lukas Wirth Date: Wed, 26 Nov 2025 13:17:57 +0100 Subject: [PATCH 052/749] askpass: Quote askpass script in askpass helper command (#43542) Closes #40276 Release Notes: - Fixed askpass execution failing on windows sometimes --- crates/askpass/src/askpass.rs | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/crates/askpass/src/askpass.rs b/crates/askpass/src/askpass.rs index a9047a567fd3b6323fb6edc64be4854f4da0a958..8e911f1654572d2bcd9f906c82449e1524d0ce9d 100644 --- a/crates/askpass/src/askpass.rs +++ b/crates/askpass/src/askpass.rs @@ -249,11 +249,15 @@ impl PasswordProxy { fs::write(&askpass_script_path, askpass_script) .await .with_context(|| format!("creating askpass script at {askpass_script_path:?}"))?; - make_file_executable(&askpass_script_path).await?; + make_file_executable(&askpass_script_path) + .await + .with_context(|| { + format!("marking askpass script executable at {askpass_script_path:?}") + })?; // todo(shell): There might be no powershell on the system #[cfg(target_os = "windows")] let askpass_helper = format!( - "powershell.exe -ExecutionPolicy Bypass -File {}", + "powershell.exe -ExecutionPolicy Bypass -File '{}'", askpass_script_path.display() ); From 7c724c0f1049e610c541c2f4f6a8739f91865e02 Mon Sep 17 00:00:00 2001 From: Finn Evers Date: Wed, 26 Nov 2025 14:11:47 +0100 Subject: [PATCH 053/749] editor: Do not show scroll thumb if page fits (#43548) Follow-up to https://github.com/zed-industries/zed/pull/39367 Release Notes: - Fixed a small issue where a scrollbar would sometimes show in the editor although the content fix exactly on screen. --- crates/editor/src/element.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/crates/editor/src/element.rs b/crates/editor/src/element.rs index 4ea12f0a21295d97cdcff565c484750e14334223..c85528353fc23ac2da4cca3682e28a30cda37f9c 100644 --- a/crates/editor/src/element.rs +++ b/crates/editor/src/element.rs @@ -10712,9 +10712,9 @@ impl ScrollbarLayout { show_thumb: bool, axis: ScrollbarAxis, ) -> Self { - let text_units_per_page = f64::from(viewport_size / glyph_space); + let text_units_per_page = viewport_size.to_f64() / glyph_space.to_f64(); let visible_range = scroll_position..scroll_position + text_units_per_page; - let total_text_units = scroll_range / f64::from(glyph_space); + let total_text_units = scroll_range / glyph_space.to_f64(); let thumb_percentage = text_units_per_page / total_text_units; let thumb_size = Pixels::from(ScrollOffset::from(track_length) * thumb_percentage) From c36b12f3b241babfd84390e7dad6e54c945fbdd6 Mon Sep 17 00:00:00 2001 From: Lukas Wirth Date: Wed, 26 Nov 2025 15:32:25 +0100 Subject: [PATCH 054/749] settings_ui: Pick a more reasonable minimum window size (#43556) Closes https://github.com/zed-industries/zed/issues/41903 Release Notes: - Fixed settings ui being forced larger than small screens --- crates/settings_ui/src/settings_ui.rs | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/crates/settings_ui/src/settings_ui.rs b/crates/settings_ui/src/settings_ui.rs index 4f29945edb2e212e3638db60213dde082a41baf6..564e78dc57b8b27398d79f861b538a9cc9dbf21c 100644 --- a/crates/settings_ui/src/settings_ui.rs +++ b/crates/settings_ui/src/settings_ui.rs @@ -607,7 +607,10 @@ pub fn open_settings_editor( window_background: cx.theme().window_background_appearance(), app_id: Some(app_id.to_owned()), window_decorations: Some(window_decorations), - window_min_size: Some(scaled_bounds), + window_min_size: Some(gpui::Size { + width: px(360.0), + height: px(240.0), + }), window_bounds: Some(WindowBounds::centered(scaled_bounds, cx)), ..Default::default() }, From 51e97d343da40750c5bd43c09396457715b9fc59 Mon Sep 17 00:00:00 2001 From: Peter Tripp Date: Wed, 26 Nov 2025 09:55:23 -0500 Subject: [PATCH 055/749] languages: Recognize .clangd as YAML (#43557) Follow-up to: https://github.com/zed-industries/zed/pull/43469 Thanks @WeetHet for [the idea]([WeetHet](https://github.com/WeetHet)). Release Notes: - Added support for identifying. .clangd files as YAML by default --- crates/languages/src/yaml/config.toml | 2 +- docs/src/languages/c.md | 1 + docs/src/languages/cpp.md | 4 +++- 3 files changed, 5 insertions(+), 2 deletions(-) diff --git a/crates/languages/src/yaml/config.toml b/crates/languages/src/yaml/config.toml index 8834b3205af810c26fc9a8835f1c2afe7a185d8c..51e8e1224a40904e0dfbb0204eb531e6b2664825 100644 --- a/crates/languages/src/yaml/config.toml +++ b/crates/languages/src/yaml/config.toml @@ -1,6 +1,6 @@ name = "YAML" grammar = "yaml" -path_suffixes = ["yml", "yaml", "pixi.lock", "clang-format"] +path_suffixes = ["yml", "yaml", "pixi.lock", "clang-format", "clangd"] line_comments = ["# "] autoclose_before = ",]}" brackets = [ diff --git a/docs/src/languages/c.md b/docs/src/languages/c.md index 565b0b5acbef78a23722020dcbad9300748dbb16..2259ad21a4afa69390ef7ef15bfa4bb96cf44e1e 100644 --- a/docs/src/languages/c.md +++ b/docs/src/languages/c.md @@ -11,6 +11,7 @@ C support is available natively in Zed. Clangd out of the box assumes mixed C++/C projects. If you have a C-only project you may wish to instruct clangd to treat all files as C using the `-xc` flag. To do this, create a `.clangd` file in the root of your project with the following: ```yaml +# yaml-language-server: $schema=https://json.schemastore.org/clangd.json CompileFlags: Add: [-xc] ``` diff --git a/docs/src/languages/cpp.md b/docs/src/languages/cpp.md index 36cdc7a9580d2de41a6eb7063d694d54c7caffa4..c20dd58335caca45a6923cc0527605d6cc4b5564 100644 --- a/docs/src/languages/cpp.md +++ b/docs/src/languages/cpp.md @@ -78,6 +78,7 @@ You can pass any number of arguments to clangd. To see a full set of available o By default Zed will use the `clangd` language server for formatting C++ code. The Clangd is the same as the `clang-format` CLI tool. To configure this you can add a `.clang-format` file. For example: ```yaml +# yaml-language-server: $schema=https://json.schemastore.org/clang-format-21.x.json --- BasedOnStyle: LLVM IndentWidth: 4 @@ -106,7 +107,8 @@ You can trigger formatting via {#kb editor::Format} or the `editor: format` acti In the root of your project, it is generally common to create a `.clangd` file to set extra configuration. -```text +```yaml +# yaml-language-server: $schema=https://json.schemastore.org/clangd.json CompileFlags: Add: - "--include-directory=/path/to/include" From 6a311cad113e261fae455cfa23ab069ab49411a2 Mon Sep 17 00:00:00 2001 From: David Kleingeld Date: Wed, 26 Nov 2025 16:46:17 +0100 Subject: [PATCH 056/749] Detail how to add symbols to samply's output (#43472) Release Notes: - N/A --- docs/src/performance.md | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/docs/src/performance.md b/docs/src/performance.md index 9dff1d7f5ff0961d33169ee5c8761016d8fb7564..a04d7c5c342d4f0dfa506451d4b890bfdfd1013c 100644 --- a/docs/src/performance.md +++ b/docs/src/performance.md @@ -4,7 +4,13 @@ How to use our internal tools to profile and keep Zed fast. See what the CPU spends the most time on. Strongly recommend you use [samply](https://github.com/mstange/samply). It opens an interactive profile in -the browser. See its README on how to install and run. +the browser (specifically a local instance of [firefox_profiler](https://profiler.firefox.com/)). + +See [samply](https://github.com/mstange/samply)'s README on how to install and run. + +The profile.json does not contain any symbols. Firefox profiler can add the local symbols to the profile for for. To do that hit the upload local profile button in the top right corner. + +image # Task/Async profiling From 8aa53612fd194c787c74447c03b9fdefc31fec98 Mon Sep 17 00:00:00 2001 From: Remco Smits Date: Wed, 26 Nov 2025 16:55:12 +0100 Subject: [PATCH 057/749] agent_ui: Add support for deleting thread history (#43370) This PR adds support for deleting your entire thread history. This is inspired by a Zed user from the meetup in Amsterdam, he was missing this feature. **Demo** https://github.com/user-attachments/assets/5a195007-1094-4ec6-902a-1b83db5ec508 Release Notes: - AI: Add support for deleting your entire thread history --------- Co-authored-by: Danilo Leal --- crates/agent/src/db.rs | 16 ++++ crates/agent/src/history_store.rs | 9 +++ crates/agent_ui/src/acp/thread_history.rs | 95 ++++++++++++++++++++++- crates/agent_ui/src/agent_panel.rs | 16 ++-- crates/agent_ui/src/agent_ui.rs | 2 + 5 files changed, 127 insertions(+), 11 deletions(-) diff --git a/crates/agent/src/db.rs b/crates/agent/src/db.rs index 6b6312e48176c93fbfb12f97e26c7943c6cbf89a..d5166c5df931b6f7fad63769449aaa9784b5263f 100644 --- a/crates/agent/src/db.rs +++ b/crates/agent/src/db.rs @@ -424,4 +424,20 @@ impl ThreadsDatabase { Ok(()) }) } + + pub fn delete_threads(&self) -> Task> { + let connection = self.connection.clone(); + + self.executor.spawn(async move { + let connection = connection.lock(); + + let mut delete = connection.exec_bound::<()>(indoc! {" + DELETE FROM threads + "})?; + + delete(())?; + + Ok(()) + }) + } } diff --git a/crates/agent/src/history_store.rs b/crates/agent/src/history_store.rs index 3bfbd99677feed5db53d96d2fa96316ac49abce4..efc0e3966d30fbc8bc7857c9da0404ce7dd4201f 100644 --- a/crates/agent/src/history_store.rs +++ b/crates/agent/src/history_store.rs @@ -188,6 +188,15 @@ impl HistoryStore { }) } + pub fn delete_threads(&mut self, cx: &mut Context) -> Task> { + let database_future = ThreadsDatabase::connect(cx); + cx.spawn(async move |this, cx| { + let database = database_future.await.map_err(|err| anyhow!(err))?; + database.delete_threads().await?; + this.update(cx, |this, cx| this.reload(cx)) + }) + } + pub fn delete_text_thread( &mut self, path: Arc, diff --git a/crates/agent_ui/src/acp/thread_history.rs b/crates/agent_ui/src/acp/thread_history.rs index 11718c63475212fbe8b996b2f6edae8b4295c91a..29759093303a684fdfd9ad255d269516ed7a29b9 100644 --- a/crates/agent_ui/src/acp/thread_history.rs +++ b/crates/agent_ui/src/acp/thread_history.rs @@ -1,5 +1,5 @@ use crate::acp::AcpThreadView; -use crate::{AgentPanel, RemoveSelectedThread}; +use crate::{AgentPanel, RemoveHistory, RemoveSelectedThread}; use agent::{HistoryEntry, HistoryStore}; use chrono::{Datelike as _, Local, NaiveDate, TimeDelta}; use editor::{Editor, EditorEvent}; @@ -12,7 +12,7 @@ use std::{fmt::Display, ops::Range}; use text::Bias; use time::{OffsetDateTime, UtcOffset}; use ui::{ - HighlightedLabel, IconButtonShape, ListItem, ListItemSpacing, Tooltip, WithScrollbar, + HighlightedLabel, IconButtonShape, ListItem, ListItemSpacing, Tab, Tooltip, WithScrollbar, prelude::*, }; @@ -25,6 +25,7 @@ pub struct AcpThreadHistory { search_query: SharedString, visible_items: Vec, local_timezone: UtcOffset, + confirming_delete_history: bool, _update_task: Task<()>, _subscriptions: Vec, } @@ -98,6 +99,7 @@ impl AcpThreadHistory { ) .unwrap(), search_query: SharedString::default(), + confirming_delete_history: false, _subscriptions: vec![search_editor_subscription, history_store_subscription], _update_task: Task::ready(()), }; @@ -331,6 +333,24 @@ impl AcpThreadHistory { task.detach_and_log_err(cx); } + fn remove_history(&mut self, _window: &mut Window, cx: &mut Context) { + self.history_store.update(cx, |store, cx| { + store.delete_threads(cx).detach_and_log_err(cx) + }); + self.confirming_delete_history = false; + cx.notify(); + } + + fn prompt_delete_history(&mut self, _window: &mut Window, cx: &mut Context) { + self.confirming_delete_history = true; + cx.notify(); + } + + fn cancel_delete_history(&mut self, _window: &mut Window, cx: &mut Context) { + self.confirming_delete_history = false; + cx.notify(); + } + fn render_list_items( &mut self, range: Range, @@ -447,6 +467,8 @@ impl Focusable for AcpThreadHistory { impl Render for AcpThreadHistory { fn render(&mut self, window: &mut Window, cx: &mut Context) -> impl IntoElement { + let has_no_history = self.history_store.read(cx).is_empty(cx); + v_flex() .key_context("ThreadHistory") .size_full() @@ -457,9 +479,12 @@ impl Render for AcpThreadHistory { .on_action(cx.listener(Self::select_last)) .on_action(cx.listener(Self::confirm)) .on_action(cx.listener(Self::remove_selected_thread)) + .on_action(cx.listener(|this, _: &RemoveHistory, window, cx| { + this.remove_history(window, cx); + })) .child( h_flex() - .h(px(41.)) // Match the toolbar perfectly + .h(Tab::container_height(cx)) .w_full() .py_1() .px_2() @@ -481,7 +506,7 @@ impl Render for AcpThreadHistory { .overflow_hidden() .flex_grow(); - if self.history_store.read(cx).is_empty(cx) { + if has_no_history { view.justify_center().items_center().child( Label::new("You don't have any past threads yet.") .size(LabelSize::Small) @@ -512,6 +537,68 @@ impl Render for AcpThreadHistory { ) } }) + .when(!has_no_history, |this| { + this.child( + h_flex() + .p_2() + .border_t_1() + .border_color(cx.theme().colors().border_variant) + .when(!self.confirming_delete_history, |this| { + this.child( + Button::new("delete_history", "Delete All History") + .full_width() + .style(ButtonStyle::Outlined) + .label_size(LabelSize::Small) + .on_click(cx.listener(|this, _, window, cx| { + this.prompt_delete_history(window, cx); + })), + ) + }) + .when(self.confirming_delete_history, |this| { + this.w_full() + .gap_2() + .flex_wrap() + .justify_between() + .child( + h_flex() + .flex_wrap() + .gap_1() + .child( + Label::new("Delete all threads?") + .size(LabelSize::Small), + ) + .child( + Label::new("You won't be able to recover them later.") + .size(LabelSize::Small) + .color(Color::Muted), + ), + ) + .child( + h_flex() + .gap_1() + .child( + Button::new("cancel_delete", "Cancel") + .label_size(LabelSize::Small) + .on_click(cx.listener(|this, _, window, cx| { + this.cancel_delete_history(window, cx); + })), + ) + .child( + Button::new("confirm_delete", "Delete") + .style(ButtonStyle::Tinted(ui::TintColor::Error)) + .color(Color::Error) + .label_size(LabelSize::Small) + .on_click(cx.listener(|_, _, window, cx| { + window.dispatch_action( + Box::new(RemoveHistory), + cx, + ); + })), + ), + ) + }), + ) + }) } } diff --git a/crates/agent_ui/src/agent_panel.rs b/crates/agent_ui/src/agent_panel.rs index 22eb11e24a8fd706c80aa65c3dcf5d8ae3876ddc..aa152018b180047815cc461d80e48dba0996b3cd 100644 --- a/crates/agent_ui/src/agent_panel.rs +++ b/crates/agent_ui/src/agent_panel.rs @@ -20,10 +20,9 @@ use zed_actions::agent::{OpenClaudeCodeOnboardingModal, ReauthenticateAgent}; use crate::ManageProfiles; use crate::ui::{AcpOnboardingModal, ClaudeCodeOnboardingModal}; use crate::{ - AddContextServer, AgentDiffPane, DeleteRecentlyOpenThread, Follow, InlineAssistant, - NewTextThread, NewThread, OpenActiveThreadAsMarkdown, OpenAgentDiff, OpenHistory, - ResetTrialEndUpsell, ResetTrialUpsell, ToggleNavigationMenu, ToggleNewThreadMenu, - ToggleOptionsMenu, + AddContextServer, AgentDiffPane, Follow, InlineAssistant, NewTextThread, NewThread, + OpenActiveThreadAsMarkdown, OpenAgentDiff, OpenHistory, ResetTrialEndUpsell, ResetTrialUpsell, + ToggleNavigationMenu, ToggleNewThreadMenu, ToggleOptionsMenu, acp::AcpThreadView, agent_configuration::{AgentConfiguration, AssistantConfigurationEvent}, slash_command::SlashCommandCompletionProvider, @@ -614,11 +613,14 @@ impl AgentPanel { if let Some(panel) = panel.upgrade() { menu = Self::populate_recently_opened_menu_section(menu, panel, cx); } - menu.action("View All", Box::new(OpenHistory)) - .end_slot_action(DeleteRecentlyOpenThread.boxed_clone()) + + menu = menu + .action("View All", Box::new(OpenHistory)) .fixed_width(px(320.).into()) .keep_open_on_confirm(false) - .key_context("NavigationMenu") + .key_context("NavigationMenu"); + + menu }); weak_panel .update(cx, |panel, cx| { diff --git a/crates/agent_ui/src/agent_ui.rs b/crates/agent_ui/src/agent_ui.rs index ae4cb70d4af419184519afb53ab62849b8a0eab8..5f5682b7dcc90d2b779744ba353380987a5907a1 100644 --- a/crates/agent_ui/src/agent_ui.rs +++ b/crates/agent_ui/src/agent_ui.rs @@ -69,6 +69,8 @@ actions!( CycleModeSelector, /// Expands the message editor to full size. ExpandMessageEditor, + /// Removes all thread history. + RemoveHistory, /// Opens the conversation history view. OpenHistory, /// Adds a context server to the configuration. From 6fbbc899049f8116685b731a3778830d709044eb Mon Sep 17 00:00:00 2001 From: "Joseph T. Lyons" Date: Wed, 26 Nov 2025 10:59:13 -0500 Subject: [PATCH 058/749] Bump Zed to v0.216 (#43564) Release Notes: - N/A --- Cargo.lock | 2 +- crates/zed/Cargo.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 3c19e1b79bc859e77c99b992f4f5617894da82df..f10c2e1d13210d67d16d584637c0fb7b71d61eec 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -21192,7 +21192,7 @@ dependencies = [ [[package]] name = "zed" -version = "0.215.0" +version = "0.216.0" dependencies = [ "acp_tools", "activity_indicator", diff --git a/crates/zed/Cargo.toml b/crates/zed/Cargo.toml index 470f1ea28a3663838080b7e7bf98f58215a0a8fc..9e6a6a0fbd10a7695270f2651418d9e2cdc31b4c 100644 --- a/crates/zed/Cargo.toml +++ b/crates/zed/Cargo.toml @@ -2,7 +2,7 @@ description = "The fast, collaborative code editor." edition.workspace = true name = "zed" -version = "0.215.0" +version = "0.216.0" publish.workspace = true license = "GPL-3.0-or-later" authors = ["Zed Team "] From 0713ddcabc1f691cf89be7d564e4a2afdfeeac17 Mon Sep 17 00:00:00 2001 From: Mayank Verma Date: Wed, 26 Nov 2025 21:36:02 +0530 Subject: [PATCH 059/749] editor: Fix vertical scroll margin not accounting for file header height (#43521) Closes #43178 Release Notes: - Fixed vertical scroll margin not accounting for file header height Here's the before/after: With `{ "vertical_scroll_margin": 0 }` in `~/.config/zed/settings.json` https://github.com/user-attachments/assets/418c6d7f-de0f-4da6-a038-69927b1b8b88 --- crates/editor/src/editor_tests.rs | 84 +++++++++++++++++++++++++++++ crates/editor/src/scroll/actions.rs | 10 +++- 2 files changed, 92 insertions(+), 2 deletions(-) diff --git a/crates/editor/src/editor_tests.rs b/crates/editor/src/editor_tests.rs index f68b15b6b258a5ab730a13af9d7ecc62763321ea..09c9083f29a57addbdd5ca01b162f4abc023d0d7 100644 --- a/crates/editor/src/editor_tests.rs +++ b/crates/editor/src/editor_tests.rs @@ -28198,3 +28198,87 @@ async fn test_multibuffer_selections_with_folding(cx: &mut TestAppContext) { 3 "}); } + +#[gpui::test] +async fn test_multibuffer_scroll_cursor_top_margin(cx: &mut TestAppContext) { + init_test(cx, |_| {}); + + let (editor, cx) = cx.add_window_view(|window, cx| { + let multi_buffer = MultiBuffer::build_multi( + [ + ("1\n2\n3\n", vec![Point::row_range(0..3)]), + ("1\n2\n3\n4\n5\n6\n7\n8\n9\n", vec![Point::row_range(0..9)]), + ], + cx, + ); + Editor::new(EditorMode::full(), multi_buffer, None, window, cx) + }); + + let mut cx = EditorTestContext::for_editor_in(editor.clone(), cx).await; + + cx.assert_excerpts_with_selections(indoc! {" + [EXCERPT] + ˇ1 + 2 + 3 + [EXCERPT] + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + "}); + + cx.update_editor(|editor, window, cx| { + editor.change_selections(None.into(), window, cx, |s| { + s.select_ranges([MultiBufferOffset(19)..MultiBufferOffset(19)]); + }); + }); + + cx.assert_excerpts_with_selections(indoc! {" + [EXCERPT] + 1 + 2 + 3 + [EXCERPT] + 1 + 2 + 3 + 4 + 5 + 6 + ˇ7 + 8 + 9 + "}); + + cx.update_editor(|editor, _window, cx| { + editor.set_vertical_scroll_margin(0, cx); + }); + + cx.update_editor(|editor, window, cx| { + assert_eq!(editor.vertical_scroll_margin(), 0); + editor.scroll_cursor_top(&ScrollCursorTop, window, cx); + assert_eq!( + editor.snapshot(window, cx).scroll_position(), + gpui::Point::new(0., 12.0) + ); + }); + + cx.update_editor(|editor, _window, cx| { + editor.set_vertical_scroll_margin(3, cx); + }); + + cx.update_editor(|editor, window, cx| { + assert_eq!(editor.vertical_scroll_margin(), 3); + editor.scroll_cursor_top(&ScrollCursorTop, window, cx); + assert_eq!( + editor.snapshot(window, cx).scroll_position(), + gpui::Point::new(0., 9.0) + ); + }); +} diff --git a/crates/editor/src/scroll/actions.rs b/crates/editor/src/scroll/actions.rs index 3b2ed55df724485ee72e6afbc02c7111817869fb..5a1c849b2438fe987b24481b824375e188468916 100644 --- a/crates/editor/src/scroll/actions.rs +++ b/crates/editor/src/scroll/actions.rs @@ -71,14 +71,20 @@ impl Editor { window: &mut Window, cx: &mut Context, ) { + let display_snapshot = self.display_snapshot(cx); let scroll_margin_rows = self.vertical_scroll_margin() as u32; let new_screen_top = self .selections - .newest_display(&self.display_snapshot(cx)) + .newest_display(&display_snapshot) .head() .row() .0; - let new_screen_top = new_screen_top.saturating_sub(scroll_margin_rows); + let header_offset = display_snapshot + .buffer_snapshot() + .show_headers() + .then(|| display_snapshot.buffer_header_height()) + .unwrap_or(0); + let new_screen_top = new_screen_top.saturating_sub(scroll_margin_rows + header_offset); self.set_scroll_top_row(DisplayRow(new_screen_top), window, cx); } From 5403e74bbd9688301ba44a953115a890196c27f6 Mon Sep 17 00:00:00 2001 From: Finn Evers Date: Wed, 26 Nov 2025 17:51:50 +0100 Subject: [PATCH 060/749] Add callable workflow to bump the version of an extension (#43566) This adds an intial workflow file that can be pulled in to create a bump commit for an extension version in an extension repository. Release Notes: - N/A --- .github/workflows/extension_bump.yml | 136 +++++++++++ tooling/xtask/src/tasks/workflows.rs | 2 + .../xtask/src/tasks/workflows/cherry_pick.rs | 22 +- .../xtask/src/tasks/workflows/compare_perf.rs | 18 +- .../src/tasks/workflows/extension_bump.rs | 217 ++++++++++++++++++ .../src/tasks/workflows/extension_tests.rs | 4 +- .../src/tasks/workflows/run_agent_evals.rs | 10 +- tooling/xtask/src/tasks/workflows/steps.rs | 12 +- tooling/xtask/src/tasks/workflows/vars.rs | 97 +++++++- 9 files changed, 485 insertions(+), 33 deletions(-) create mode 100644 .github/workflows/extension_bump.yml create mode 100644 tooling/xtask/src/tasks/workflows/extension_bump.rs diff --git a/.github/workflows/extension_bump.yml b/.github/workflows/extension_bump.yml new file mode 100644 index 0000000000000000000000000000000000000000..5933ab7fbb2fab753cbda729c82026102e395539 --- /dev/null +++ b/.github/workflows/extension_bump.yml @@ -0,0 +1,136 @@ +# Generated from xtask::workflows::extension_bump +# Rebuild with `cargo xtask workflows`. +name: extension_bump +env: + CARGO_TERM_COLOR: always + RUST_BACKTRACE: '1' + CARGO_INCREMENTAL: '0' + ZED_EXTENSION_CLI_SHA: 7cfce605704d41ca247e3f84804bf323f6c6caaf +on: + workflow_call: + inputs: + bump-type: + description: bump-type + type: string + default: patch + secrets: + app-id: + description: The app ID used to create the PR + required: true + app-secret: + description: The app secret for the corresponding app ID + required: true +jobs: + check_extension: + if: (github.repository_owner == 'zed-industries' || github.repository_owner == 'zed-extensions') + runs-on: namespace-profile-2x4-ubuntu-2404 + steps: + - name: steps::checkout_repo + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 + with: + clean: false + - id: cache-zed-extension-cli + name: extension_tests::cache_zed_extension_cli + uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830 + with: + path: zed-extension + key: zed-extension-${{ env.ZED_EXTENSION_CLI_SHA }} + - name: extension_tests::download_zed_extension_cli + if: steps.cache-zed-extension-cli.outputs.cache-hit != 'true' + run: | + wget --quiet "https://zed-extension-cli.nyc3.digitaloceanspaces.com/$ZED_EXTENSION_CLI_SHA/x86_64-unknown-linux-gnu/zed-extension" + chmod +x zed-extension + shell: bash -euxo pipefail {0} + - name: extension_tests::check + run: | + mkdir -p /tmp/ext-scratch + mkdir -p /tmp/ext-output + ./zed-extension --source-dir . --scratch-dir /tmp/ext-scratch --output-dir /tmp/ext-output + shell: bash -euxo pipefail {0} + timeout-minutes: 1 + check_bump_needed: + if: (github.repository_owner == 'zed-industries' || github.repository_owner == 'zed-extensions') + runs-on: namespace-profile-2x4-ubuntu-2404 + steps: + - name: steps::checkout_repo + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 + with: + clean: false + fetch-depth: 10 + - id: compare-versions-check + name: extension_bump::compare_versions + run: |+ + CURRENT_VERSION="$(sed -n 's/version = \"\(.*\)\"/\1/p' < extension.toml)" + + git checkout "$(git log -1 --format=%H)"~1 + + PREV_COMMIT_VERSION="$(sed -n 's/version = \"\(.*\)\"/\1/p' < extension.toml)" + + [[ "$CURRENT_VERSION" == "$PREV_COMMIT_VERSION" ]] && \ + echo "needs_bump=true" >> "$GITHUB_OUTPUT" || \ + echo "needs_bump=false" >> "$GITHUB_OUTPUT" + + shell: bash -euxo pipefail {0} + outputs: + needs_bump: ${{ steps.compare-versions-check.outputs.needs_bump }} + timeout-minutes: 1 + bump_extension_version: + needs: + - check_extension + - check_bump_needed + if: (github.repository_owner == 'zed-industries' || github.repository_owner == 'zed-extensions') && needs.check_bump_needed.outputs.needs_bump == 'true' + runs-on: namespace-profile-8x16-ubuntu-2204 + steps: + - id: generate-token + name: extension_bump::generate_token + uses: actions/create-github-app-token@v2 + with: + app-id: ${{ secrets.app-id }} + private-key: ${{ secrets.app-secret }} + - name: steps::checkout_repo + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 + with: + clean: false + - name: extension_bump::install_bump_2_version + run: pip install bump2version + shell: bash -euxo pipefail {0} + - id: bump-version + name: extension_bump::bump_version + run: | + OLD_VERSION="$(sed -n 's/version = \"\(.*\)\"/\1/p' < extension.toml)" + + cat < .bumpversion.cfg + [bumpversion] + current_version = "$OLD_VERSION" + + [bumpversion:file:Cargo.toml] + + [bumpversion:file:extension.toml] + + EOF + + bump2version --verbose ${{ inputs.bump-type }} + NEW_VERSION="$(sed -n 's/version = \"\(.*\)\"/\1/p' < extension.toml)" + cargo update --workspace + + rm .bumpversion.cfg + + echo "old_version=${OLD_VERSION}" >> "$GITHUB_OUTPUT" + echo "new_version=${NEW_VERSION}" >> "$GITHUB_OUTPUT" + shell: bash -euxo pipefail {0} + - name: extension_bump::create_pull_request + uses: peter-evans/create-pull-request@v7 + with: + title: Bump version to ${{ steps.bump-version.outputs.new_version }} + body: This PR bumps the version of this extension to v${{ steps.bump-version.outputs.new_version }} + commit-message: Bump version to v${{ steps.bump-version.outputs.new_version }} + branch: bump-from-${{ steps.bump-version.outputs.old_version }} + committer: zed-zippy[bot] <234243425+zed-zippy[bot]@users.noreply.github.com> + base: main + delete-branch: true + token: ${{ steps.generate-token.outputs.token }} + sign-commits: true + timeout-minutes: 1 +concurrency: + group: ${{ github.workflow }}-${{ github.ref_name }}-${{ github.ref_name == 'main' && github.sha || 'anysha' }} + cancel-in-progress: true diff --git a/tooling/xtask/src/tasks/workflows.rs b/tooling/xtask/src/tasks/workflows.rs index c18eca52be8cf7fa369f46427c58b1d6b70e8bd0..334bc71e46fc5d7d99a9fab238dc60b874a1093d 100644 --- a/tooling/xtask/src/tasks/workflows.rs +++ b/tooling/xtask/src/tasks/workflows.rs @@ -7,6 +7,7 @@ mod after_release; mod cherry_pick; mod compare_perf; mod danger; +mod extension_bump; mod extension_tests; mod nix_build; mod release_nightly; @@ -44,6 +45,7 @@ pub fn run_workflows(_: GenerateWorkflowArgs) -> Result<()> { ("run_agent_evals.yml", run_agent_evals::run_agent_evals()), ("after_release.yml", after_release::after_release()), ("extension_tests.yml", extension_tests::extension_tests()), + ("extension_bump.yml", extension_bump::extension_bump()), ]; fs::create_dir_all(dir) .with_context(|| format!("Failed to create directory: {}", dir.display()))?; diff --git a/tooling/xtask/src/tasks/workflows/cherry_pick.rs b/tooling/xtask/src/tasks/workflows/cherry_pick.rs index 6181d79e042365b87ce2d6ad00a75580c71344a2..105bf74c4194a46ad4ca62991fae3a945eea150d 100644 --- a/tooling/xtask/src/tasks/workflows/cherry_pick.rs +++ b/tooling/xtask/src/tasks/workflows/cherry_pick.rs @@ -3,14 +3,14 @@ use gh_workflow::*; use crate::tasks::workflows::{ runners, steps::{self, NamedJob, named}, - vars::{self, Input, StepOutput}, + vars::{self, StepOutput, WorkflowInput}, }; pub fn cherry_pick() -> Workflow { - let branch = Input::string("branch", None); - let commit = Input::string("commit", None); - let channel = Input::string("channel", None); - let pr_number = Input::string("pr_number", None); + let branch = WorkflowInput::string("branch", None); + let commit = WorkflowInput::string("commit", None); + let channel = WorkflowInput::string("channel", None); + let pr_number = WorkflowInput::string("pr_number", None); let cherry_pick = run_cherry_pick(&branch, &commit, &channel); named::workflow() .run_name(format!("cherry_pick to {channel} #{pr_number}")) @@ -24,7 +24,11 @@ pub fn cherry_pick() -> Workflow { .add_job(cherry_pick.name, cherry_pick.job) } -fn run_cherry_pick(branch: &Input, commit: &Input, channel: &Input) -> NamedJob { +fn run_cherry_pick( + branch: &WorkflowInput, + commit: &WorkflowInput, + channel: &WorkflowInput, +) -> NamedJob { fn authenticate_as_zippy() -> (Step, StepOutput) { let step = named::uses( "actions", @@ -39,9 +43,9 @@ fn run_cherry_pick(branch: &Input, commit: &Input, channel: &Input) -> NamedJob } fn cherry_pick( - branch: &Input, - commit: &Input, - channel: &Input, + branch: &WorkflowInput, + commit: &WorkflowInput, + channel: &WorkflowInput, token: &StepOutput, ) -> Step { named::bash(&format!("./script/cherry-pick {branch} {commit} {channel}")) diff --git a/tooling/xtask/src/tasks/workflows/compare_perf.rs b/tooling/xtask/src/tasks/workflows/compare_perf.rs index db9f21de15fe159c369ad603e3ab0ff93e1cc7f9..1d111acc4f8a4dc47edea6f45c0b93c845b7cda2 100644 --- a/tooling/xtask/src/tasks/workflows/compare_perf.rs +++ b/tooling/xtask/src/tasks/workflows/compare_perf.rs @@ -5,13 +5,13 @@ use crate::tasks::workflows::steps::FluentBuilder; use crate::tasks::workflows::{ runners, steps::{self, NamedJob, named}, - vars::Input, + vars::WorkflowInput, }; pub fn compare_perf() -> Workflow { - let head = Input::string("head", None); - let base = Input::string("base", None); - let crate_name = Input::string("crate_name", Some("".to_owned())); + let head = WorkflowInput::string("head", None); + let base = WorkflowInput::string("base", None); + let crate_name = WorkflowInput::string("crate_name", Some("".to_owned())); let run_perf = run_perf(&base, &head, &crate_name); named::workflow() .on(Event::default().workflow_dispatch( @@ -23,8 +23,12 @@ pub fn compare_perf() -> Workflow { .add_job(run_perf.name, run_perf.job) } -pub fn run_perf(base: &Input, head: &Input, crate_name: &Input) -> NamedJob { - fn cargo_perf_test(ref_name: &Input, crate_name: &Input) -> Step { +pub fn run_perf( + base: &WorkflowInput, + head: &WorkflowInput, + crate_name: &WorkflowInput, +) -> NamedJob { + fn cargo_perf_test(ref_name: &WorkflowInput, crate_name: &WorkflowInput) -> Step { named::bash(&format!( " if [ -n \"{crate_name}\" ]; then @@ -39,7 +43,7 @@ pub fn run_perf(base: &Input, head: &Input, crate_name: &Input) -> NamedJob { named::uses("taiki-e", "install-action", "hyperfine") } - fn compare_runs(head: &Input, base: &Input) -> Step { + fn compare_runs(head: &WorkflowInput, base: &WorkflowInput) -> Step { named::bash(&format!( "cargo perf-compare --save=results.md {base} {head}" )) diff --git a/tooling/xtask/src/tasks/workflows/extension_bump.rs b/tooling/xtask/src/tasks/workflows/extension_bump.rs new file mode 100644 index 0000000000000000000000000000000000000000..66de1f86aa998269abc24f1de375dbe1800acc31 --- /dev/null +++ b/tooling/xtask/src/tasks/workflows/extension_bump.rs @@ -0,0 +1,217 @@ +use gh_workflow::*; +use indoc::indoc; + +use crate::tasks::workflows::{ + extension_tests::{self}, + runners, + steps::{self, CommonJobConditions, DEFAULT_REPOSITORY_OWNER_GUARD, NamedJob, named}, + vars::{ + JobOutput, StepOutput, WorkflowInput, WorkflowSecret, one_workflow_per_non_main_branch, + }, +}; + +const BUMPVERSION_CONFIG: &str = indoc! {r#" + [bumpversion] + current_version = "$OLD_VERSION" + + [bumpversion:file:Cargo.toml] + + [bumpversion:file:extension.toml] + "# +}; + +const VERSION_CHECK: &str = r#"sed -n 's/version = \"\(.*\)\"/\1/p' < extension.toml"#; + +// This is used by various extensions repos in the zed-extensions org to bump extension versions. +pub(crate) fn extension_bump() -> Workflow { + let bump_type = WorkflowInput::string("bump-type", Some("patch".to_owned())); + + let app_id = WorkflowSecret::new("app-id", "The app ID used to create the PR"); + let app_secret = + WorkflowSecret::new("app-secret", "The app secret for the corresponding app ID"); + + let test_extension = extension_tests::check_extension(); + let (check_bump_needed, needs_bump) = check_bump_needed(); + let bump_version = bump_extension_version( + &[&test_extension, &check_bump_needed], + &bump_type, + needs_bump.as_job_output(&check_bump_needed), + &app_id, + &app_secret, + ); + + named::workflow() + .add_event( + Event::default().workflow_call( + WorkflowCall::default() + .add_input(bump_type.name, bump_type.call_input()) + .secrets([ + (app_id.name.to_owned(), app_id.secret_configuration()), + ( + app_secret.name.to_owned(), + app_secret.secret_configuration(), + ), + ]), + ), + ) + .concurrency(one_workflow_per_non_main_branch()) + .add_env(("CARGO_TERM_COLOR", "always")) + .add_env(("RUST_BACKTRACE", 1)) + .add_env(("CARGO_INCREMENTAL", 0)) + .add_env(( + "ZED_EXTENSION_CLI_SHA", + extension_tests::ZED_EXTENSION_CLI_SHA, + )) + .add_job(test_extension.name, test_extension.job) + .add_job(check_bump_needed.name, check_bump_needed.job) + .add_job(bump_version.name, bump_version.job) +} + +fn check_bump_needed() -> (NamedJob, StepOutput) { + let (compare_versions, version_changed) = compare_versions(); + + let job = Job::default() + .with_repository_owner_guard() + .outputs([(version_changed.name.to_owned(), version_changed.to_string())]) + .runs_on(runners::LINUX_SMALL) + .timeout_minutes(1u32) + .add_step(steps::checkout_repo().add_with(("fetch-depth", 10))) + .add_step(compare_versions); + + (named::job(job), version_changed) +} + +/// Compares the current and previous commit and checks whether versions changed inbetween. +fn compare_versions() -> (Step, StepOutput) { + let check_needs_bump = named::bash(format!( + indoc! { + r#" + CURRENT_VERSION="$({})" + + git checkout "$(git log -1 --format=%H)"~1 + + PREV_COMMIT_VERSION="$({})" + + [[ "$CURRENT_VERSION" == "$PREV_COMMIT_VERSION" ]] && \ + echo "needs_bump=true" >> "$GITHUB_OUTPUT" || \ + echo "needs_bump=false" >> "$GITHUB_OUTPUT" + + "# + }, + VERSION_CHECK, VERSION_CHECK + )) + .id("compare-versions-check"); + + let needs_bump = StepOutput::new(&check_needs_bump, "needs_bump"); + + (check_needs_bump, needs_bump) +} + +fn bump_extension_version( + dependencies: &[&NamedJob], + bump_type: &WorkflowInput, + needs_bump: JobOutput, + app_id: &WorkflowSecret, + app_secret: &WorkflowSecret, +) -> NamedJob { + let (generate_token, generated_token) = generate_token(app_id, app_secret); + let (bump_version, old_version, new_version) = bump_version(bump_type); + + let job = steps::dependant_job(dependencies) + .cond(Expression::new(format!( + "{DEFAULT_REPOSITORY_OWNER_GUARD} && {} == 'true'", + needs_bump.expr(), + ))) + .runs_on(runners::LINUX_LARGE) + .timeout_minutes(1u32) + .add_step(generate_token) + .add_step(steps::checkout_repo()) + .add_step(install_bump_2_version()) + .add_step(bump_version) + .add_step(create_pull_request( + old_version, + new_version, + generated_token, + )); + + named::job(job) +} + +fn generate_token(app_id: &WorkflowSecret, app_secret: &WorkflowSecret) -> (Step, StepOutput) { + let step = named::uses("actions", "create-github-app-token", "v2") + .id("generate-token") + .add_with( + Input::default() + .add("app-id", app_id.to_string()) + .add("private-key", app_secret.to_string()), + ); + + let generated_token = StepOutput::new(&step, "token"); + + (step, generated_token) +} + +fn install_bump_2_version() -> Step { + named::run(runners::Platform::Linux, "pip install bump2version") +} + +fn bump_version(bump_type: &WorkflowInput) -> (Step, StepOutput, StepOutput) { + let step = named::bash(format!( + indoc! {r#" + OLD_VERSION="$({})" + + cat < .bumpversion.cfg + {} + EOF + + bump2version --verbose {} + NEW_VERSION="$({})" + cargo update --workspace + + rm .bumpversion.cfg + + echo "old_version=${{OLD_VERSION}}" >> "$GITHUB_OUTPUT" + echo "new_version=${{NEW_VERSION}}" >> "$GITHUB_OUTPUT" + "# + }, + VERSION_CHECK, BUMPVERSION_CONFIG, bump_type, VERSION_CHECK + )) + .id("bump-version"); + + let old_version = StepOutput::new(&step, "old_version"); + let new_version = StepOutput::new(&step, "new_version"); + (step, old_version, new_version) +} + +fn create_pull_request( + old_version: StepOutput, + new_version: StepOutput, + generated_token: StepOutput, +) -> Step { + let formatted_version = format!("v{}", new_version); + + named::uses("peter-evans", "create-pull-request", "v7").with( + Input::default() + .add("title", format!("Bump version to {}", new_version)) + .add( + "body", + format!( + "This PR bumps the version of this extension to {}", + formatted_version + ), + ) + .add( + "commit-message", + format!("Bump version to {}", formatted_version), + ) + .add("branch", format!("bump-from-{}", old_version)) + .add( + "committer", + "zed-zippy[bot] <234243425+zed-zippy[bot]@users.noreply.github.com>", + ) + .add("base", "main") + .add("delete-branch", true) + .add("token", generated_token.to_string()) + .add("sign-commits", true), + ) +} diff --git a/tooling/xtask/src/tasks/workflows/extension_tests.rs b/tooling/xtask/src/tasks/workflows/extension_tests.rs index 4ee094fd37608c2427037effac3a6afa182014ba..8ea1435292372e33d5f98d1b3a5d5db0582a6a46 100644 --- a/tooling/xtask/src/tasks/workflows/extension_tests.rs +++ b/tooling/xtask/src/tasks/workflows/extension_tests.rs @@ -9,7 +9,7 @@ use crate::tasks::workflows::{ }; const RUN_TESTS_INPUT: &str = "run_tests"; -const ZED_EXTENSION_CLI_SHA: &str = "7cfce605704d41ca247e3f84804bf323f6c6caaf"; +pub(crate) const ZED_EXTENSION_CLI_SHA: &str = "7cfce605704d41ca247e3f84804bf323f6c6caaf"; // This is used by various extensions repos in the zed-extensions org to run automated tests. pub(crate) fn extension_tests() -> Workflow { @@ -77,7 +77,7 @@ fn check_rust() -> NamedJob { named::job(job) } -fn check_extension() -> NamedJob { +pub(crate) fn check_extension() -> NamedJob { let (cache_download, cache_hit) = cache_zed_extension_cli(); let job = Job::default() .with_repository_owner_guard() diff --git a/tooling/xtask/src/tasks/workflows/run_agent_evals.rs b/tooling/xtask/src/tasks/workflows/run_agent_evals.rs index ec9b70a2db9049b62676b43d614818374e0930a1..220d3872f72326f42845622b5e3c61f4819f4550 100644 --- a/tooling/xtask/src/tasks/workflows/run_agent_evals.rs +++ b/tooling/xtask/src/tasks/workflows/run_agent_evals.rs @@ -3,12 +3,12 @@ use gh_workflow::{Event, Expression, Job, Run, Schedule, Step, Use, Workflow, Wo use crate::tasks::workflows::{ runners::{self, Platform}, steps::{self, FluentBuilder as _, NamedJob, named, setup_cargo_config}, - vars::{self, Input}, + vars::{self, WorkflowInput}, }; pub(crate) fn run_agent_evals() -> Workflow { let agent_evals = agent_evals(); - let model_name = Input::string("model_name", None); + let model_name = WorkflowInput::string("model_name", None); named::workflow() .on(Event::default().workflow_dispatch( @@ -29,8 +29,8 @@ pub(crate) fn run_agent_evals() -> Workflow { } pub(crate) fn run_unit_evals() -> Workflow { - let model_name = Input::string("model_name", None); - let commit_sha = Input::string("commit_sha", None); + let model_name = WorkflowInput::string("model_name", None); + let commit_sha = WorkflowInput::string("commit_sha", None); let unit_evals = named::job(unit_evals(Some(&commit_sha))); @@ -117,7 +117,7 @@ fn cron_unit_evals() -> NamedJob { named::job(unit_evals(None).add_step(send_failure_to_slack())) } -fn unit_evals(commit: Option<&Input>) -> Job { +fn unit_evals(commit: Option<&WorkflowInput>) -> Job { let script_step = add_api_keys(steps::script("./script/run-unit-evals")); Job::default() diff --git a/tooling/xtask/src/tasks/workflows/steps.rs b/tooling/xtask/src/tasks/workflows/steps.rs index 910b344cb7319e4f58911b3025632e560553716a..e20dafe18a660a0067708cc1e9d15d59572e5f53 100644 --- a/tooling/xtask/src/tasks/workflows/steps.rs +++ b/tooling/xtask/src/tasks/workflows/steps.rs @@ -142,9 +142,13 @@ pub struct NamedJob { // } // } +pub(crate) const DEFAULT_REPOSITORY_OWNER_GUARD: &str = + "(github.repository_owner == 'zed-industries' || github.repository_owner == 'zed-extensions')"; + pub fn repository_owner_guard_expression(trigger_always: bool) -> Expression { Expression::new(format!( - "(github.repository_owner == 'zed-industries' || github.repository_owner == 'zed-extensions'){}", + "{}{}", + DEFAULT_REPOSITORY_OWNER_GUARD, trigger_always.then_some(" && always()").unwrap_or_default() )) } @@ -248,8 +252,10 @@ pub mod named { /// Returns a bash-script step with the same name as the enclosing function. /// (You shouldn't inline this function into the workflow definition, you must /// wrap it in a new function.) - pub fn bash(script: &str) -> Step { - Step::new(function_name(1)).run(script).shell(BASH_SHELL) + pub fn bash(script: impl AsRef) -> Step { + Step::new(function_name(1)) + .run(script.as_ref()) + .shell(BASH_SHELL) } /// Returns a pwsh-script step with the same name as the enclosing function. diff --git a/tooling/xtask/src/tasks/workflows/vars.rs b/tooling/xtask/src/tasks/workflows/vars.rs index bbb98af757dd9b794ae7c57d6ddb4f1d3d10019d..8dae64a1ea10ca891d23cabb989f5073ddd1755d 100644 --- a/tooling/xtask/src/tasks/workflows/vars.rs +++ b/tooling/xtask/src/tasks/workflows/vars.rs @@ -1,6 +1,9 @@ use std::cell::RefCell; -use gh_workflow::{Concurrency, Env, Expression, Step, WorkflowDispatchInput}; +use gh_workflow::{ + Concurrency, Env, Expression, Step, WorkflowCallInput, WorkflowCallSecret, + WorkflowDispatchInput, +}; use crate::tasks::workflows::{runners::Platform, steps::NamedJob}; @@ -132,7 +135,7 @@ impl PathCondition { } pub(crate) struct StepOutput { - name: &'static str, + pub name: &'static str, step_id: String, } @@ -151,6 +154,13 @@ impl StepOutput { pub fn expr(&self) -> String { format!("steps.{}.outputs.{}", self.step_id, self.name) } + + pub fn as_job_output(self, job: &NamedJob) -> JobOutput { + JobOutput { + job_name: job.name.clone(), + name: self.name, + } + } } impl serde::Serialize for StepOutput { @@ -164,17 +174,43 @@ impl serde::Serialize for StepOutput { impl std::fmt::Display for StepOutput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "${{{{ steps.{}.outputs.{} }}}}", self.step_id, self.name) + write!(f, "${{{{ {} }}}}", self.expr()) + } +} + +pub(crate) struct JobOutput { + job_name: String, + name: &'static str, +} + +impl JobOutput { + pub fn expr(&self) -> String { + format!("needs.{}.outputs.{}", self.job_name, self.name) + } +} + +impl serde::Serialize for JobOutput { + fn serialize(&self, serializer: S) -> Result + where + S: serde::Serializer, + { + serializer.serialize_str(&self.to_string()) + } +} + +impl std::fmt::Display for JobOutput { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "${{{{ {} }}}}", self.expr()) } } -pub struct Input { +pub struct WorkflowInput { pub input_type: &'static str, pub name: &'static str, pub default: Option, } -impl Input { +impl WorkflowInput { pub fn string(name: &'static str, default: Option) -> Self { Self { input_type: "string", @@ -191,15 +227,62 @@ impl Input { default: self.default.clone(), } } + + pub fn call_input(&self) -> WorkflowCallInput { + WorkflowCallInput { + description: self.name.to_owned(), + required: self.default.is_none(), + input_type: self.input_type.to_owned(), + default: self.default.clone(), + } + } } -impl std::fmt::Display for Input { +impl std::fmt::Display for WorkflowInput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "${{{{ inputs.{} }}}}", self.name) } } -impl serde::Serialize for Input { +impl serde::Serialize for WorkflowInput { + fn serialize(&self, serializer: S) -> Result + where + S: serde::Serializer, + { + serializer.serialize_str(&self.to_string()) + } +} + +pub(crate) struct WorkflowSecret { + pub name: &'static str, + description: String, + required: bool, +} + +impl WorkflowSecret { + pub fn new(name: &'static str, description: impl ToString) -> Self { + Self { + name, + description: description.to_string(), + required: true, + } + } + + pub fn secret_configuration(&self) -> WorkflowCallSecret { + WorkflowCallSecret { + description: self.description.clone(), + required: self.required, + } + } +} + +impl std::fmt::Display for WorkflowSecret { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "${{{{ secrets.{} }}}}", self.name) + } +} + +impl serde::Serialize for WorkflowSecret { fn serialize(&self, serializer: S) -> Result where S: serde::Serializer, From 57e1bb810632c0858ea2e8fd6833c6ac261bdde1 Mon Sep 17 00:00:00 2001 From: Marshall Bowers Date: Wed, 26 Nov 2025 11:53:05 -0500 Subject: [PATCH 061/749] collab: Add `zed-zippy[bot]` to the `GET /contributor` endpoint (#43568) This PR adds the `zed-zippy[bot]` user to the `GET /contributor` endpoint so that it passes the CLA check. Release Notes: - N/A --- crates/collab/src/api/contributors.rs | 40 +++++++++++++++++++++++++++ 1 file changed, 40 insertions(+) diff --git a/crates/collab/src/api/contributors.rs b/crates/collab/src/api/contributors.rs index 8cfef0ad7e717614e23c3cf9d04852c976f1f55f..574667c723dce62b905e3d2a0b34de1ca4c88c8e 100644 --- a/crates/collab/src/api/contributors.rs +++ b/crates/collab/src/api/contributors.rs @@ -64,6 +64,16 @@ async fn check_is_contributor( })); } + if ZedZippyBot::is_zed_zippy_bot(¶ms) { + return Ok(Json(CheckIsContributorResponse { + signed_at: Some( + ZedZippyBot::created_at() + .and_utc() + .to_rfc3339_opts(SecondsFormat::Millis, true), + ), + })); + } + Ok(Json(CheckIsContributorResponse { signed_at: app .db @@ -103,6 +113,36 @@ impl RenovateBot { } } +/// The Zed Zippy bot GitHub user (`zed-zippy[bot]`). +/// +/// https://api.github.com/users/zed-zippy[bot] +struct ZedZippyBot; + +impl ZedZippyBot { + const LOGIN: &'static str = "zed-zippy[bot]"; + const USER_ID: i32 = 234243425; + + /// Returns the `created_at` timestamp for the Zed Zippy bot user. + fn created_at() -> &'static NaiveDateTime { + static CREATED_AT: OnceLock = OnceLock::new(); + CREATED_AT.get_or_init(|| { + chrono::DateTime::parse_from_rfc3339("2025-09-24T17:00:11Z") + .expect("failed to parse 'created_at' for 'zed-zippy[bot]'") + .naive_utc() + }) + } + + /// Returns whether the given contributor selector corresponds to the Zed Zippy bot user. + fn is_zed_zippy_bot(contributor: &ContributorSelector) -> bool { + match contributor { + ContributorSelector::GitHubLogin { github_login } => github_login == Self::LOGIN, + ContributorSelector::GitHubUserId { github_user_id } => { + github_user_id == &Self::USER_ID + } + } + } +} + #[derive(Debug, Deserialize)] struct AddContributorBody { github_user_id: i32, From 757c043171b95013fa5ffa5c29b2a9fae0b7fb90 Mon Sep 17 00:00:00 2001 From: Cole Miller Date: Wed, 26 Nov 2025 11:56:34 -0500 Subject: [PATCH 062/749] Fix git features not working when a Windows host collaborates with a unix guest (#43515) We were using `std::path::Path::strip_prefix` to determine which repository an absolute path belongs to, which doesn't work when the paths are Windows-style but the code is running on unix. Replace it with a platform-agnostic implementation of `strip_prefix`. Release Notes: - Fixed git features not working when a Windows host collaborates with a unix guest --- crates/agent_ui/src/acp/message_editor.rs | 2 +- crates/agent_ui/src/acp/thread_view.rs | 2 +- crates/file_finder/src/file_finder.rs | 4 +- crates/file_finder/src/file_finder_tests.rs | 2 +- crates/file_finder/src/open_path_prompt.rs | 12 +- crates/fuzzy/src/paths.rs | 2 +- crates/git_ui/src/git_panel.rs | 7 +- crates/project/src/git_store.rs | 6 +- crates/project/src/project.rs | 2 +- crates/project_panel/src/project_panel.rs | 2 +- crates/settings_ui/src/settings_ui.rs | 2 +- .../src/toolchain_selector.rs | 2 +- crates/util/src/paths.rs | 134 +++++++++++++++++- crates/vim/src/command.rs | 4 +- crates/worktree/src/worktree.rs | 6 +- 15 files changed, 159 insertions(+), 30 deletions(-) diff --git a/crates/agent_ui/src/acp/message_editor.rs b/crates/agent_ui/src/acp/message_editor.rs index 169220a3614bf2d74d24a9638f87b9613a556bd6..facb86f3b87e746d35d8b91f27550e351b10e8b6 100644 --- a/crates/agent_ui/src/acp/message_editor.rs +++ b/crates/agent_ui/src/acp/message_editor.rs @@ -1423,7 +1423,7 @@ mod tests { rel_path("b/eight.txt"), ]; - let slash = PathStyle::local().separator(); + let slash = PathStyle::local().primary_separator(); let mut opened_editors = Vec::new(); for path in paths { diff --git a/crates/agent_ui/src/acp/thread_view.rs b/crates/agent_ui/src/acp/thread_view.rs index 1c9e3f83e383658051f7799a7e3096f532addbe1..45b15e6e9e3eaa03fc69912eab3e778335b714d4 100644 --- a/crates/agent_ui/src/acp/thread_view.rs +++ b/crates/agent_ui/src/acp/thread_view.rs @@ -3989,7 +3989,7 @@ impl AcpThreadView { let file = buffer.read(cx).file()?; let path = file.path(); let path_style = file.path_style(cx); - let separator = file.path_style(cx).separator(); + let separator = file.path_style(cx).primary_separator(); let file_path = path.parent().and_then(|parent| { if parent.is_empty() { diff --git a/crates/file_finder/src/file_finder.rs b/crates/file_finder/src/file_finder.rs index 6f64dc20d0b97f1b12fb627c72209df555e6f1a7..050d7a45a1b46e94a195f88e49fd6795ce37f09f 100644 --- a/crates/file_finder/src/file_finder.rs +++ b/crates/file_finder/src/file_finder.rs @@ -1060,7 +1060,7 @@ impl FileFinderDelegate { ( filename.to_string(), Vec::new(), - prefix.display(path_style).to_string() + path_style.separator(), + prefix.display(path_style).to_string() + path_style.primary_separator(), Vec::new(), ) } else { @@ -1071,7 +1071,7 @@ impl FileFinderDelegate { .map_or(String::new(), |f| f.to_string_lossy().into_owned()), Vec::new(), entry_path.absolute.parent().map_or(String::new(), |path| { - path.to_string_lossy().into_owned() + path_style.separator() + path.to_string_lossy().into_owned() + path_style.primary_separator() }), Vec::new(), ) diff --git a/crates/file_finder/src/file_finder_tests.rs b/crates/file_finder/src/file_finder_tests.rs index d6971da15fde8406ac4d00fb613906c91e25d8d4..aeb9d794c2b4bc014bd332ed03dc8e5c3dda709b 100644 --- a/crates/file_finder/src/file_finder_tests.rs +++ b/crates/file_finder/src/file_finder_tests.rs @@ -1598,7 +1598,7 @@ async fn test_history_match_positions(cx: &mut gpui::TestAppContext) { assert_eq!(file_label.highlight_indices(), &[0, 1, 2]); assert_eq!( path_label.text(), - format!("test{}", PathStyle::local().separator()) + format!("test{}", PathStyle::local().primary_separator()) ); assert_eq!(path_label.highlight_indices(), &[] as &[usize]); }); diff --git a/crates/file_finder/src/open_path_prompt.rs b/crates/file_finder/src/open_path_prompt.rs index 53bad3b34880d69aba169df965db71f69b2296eb..2ae0c47776acb5c58b7d0919aa7522fb64d923d0 100644 --- a/crates/file_finder/src/open_path_prompt.rs +++ b/crates/file_finder/src/open_path_prompt.rs @@ -559,7 +559,7 @@ impl PickerDelegate for OpenPathDelegate { parent_path, candidate.path.string, if candidate.is_dir { - path_style.separator() + path_style.primary_separator() } else { "" } @@ -569,7 +569,7 @@ impl PickerDelegate for OpenPathDelegate { parent_path, candidate.path.string, if candidate.is_dir { - path_style.separator() + path_style.primary_separator() } else { "" } @@ -826,7 +826,13 @@ impl PickerDelegate for OpenPathDelegate { } fn placeholder_text(&self, _window: &mut Window, _cx: &mut App) -> Arc { - Arc::from(format!("[directory{}]filename.ext", self.path_style.separator()).as_str()) + Arc::from( + format!( + "[directory{}]filename.ext", + self.path_style.primary_separator() + ) + .as_str(), + ) } fn separators_after_indices(&self) -> Vec { diff --git a/crates/fuzzy/src/paths.rs b/crates/fuzzy/src/paths.rs index b35f0c1ce6cec73995838eb82bf782d00f0129af..cce0e082840c4cd05d6e2b21eac0073d3eb7700f 100644 --- a/crates/fuzzy/src/paths.rs +++ b/crates/fuzzy/src/paths.rs @@ -107,7 +107,7 @@ pub fn match_fixed_path_set( .display(path_style) .chars() .collect::>(); - path_prefix_chars.extend(path_style.separator().chars()); + path_prefix_chars.extend(path_style.primary_separator().chars()); let lowercase_pfx = path_prefix_chars .iter() .map(|c| c.to_ascii_lowercase()) diff --git a/crates/git_ui/src/git_panel.rs b/crates/git_ui/src/git_panel.rs index 4a5cd56ec90fd95fe94d55edfdeb7e2114fea820..1f66d194477c64fef207e63d4c87ad4d76675f65 100644 --- a/crates/git_ui/src/git_panel.rs +++ b/crates/git_ui/src/git_panel.rs @@ -4351,8 +4351,11 @@ impl GitPanel { .when(strikethrough, Label::strikethrough), ), (true, false) => this.child( - self.entry_label(format!("{dir}{}", path_style.separator()), path_color) - .when(strikethrough, Label::strikethrough), + self.entry_label( + format!("{dir}{}", path_style.primary_separator()), + path_color, + ) + .when(strikethrough, Label::strikethrough), ), _ => this, } diff --git a/crates/project/src/git_store.rs b/crates/project/src/git_store.rs index 8b83fa48e9b61a7200a001f4d42227b1c2302874..e7a69c0e81464ac74d02bc8a552089ddcd7db039 100644 --- a/crates/project/src/git_store.rs +++ b/crates/project/src/git_store.rs @@ -3222,10 +3222,8 @@ impl RepositorySnapshot { abs_path: &Path, path_style: PathStyle, ) -> Option { - abs_path - .strip_prefix(&work_directory_abs_path) - .ok() - .and_then(|path| RepoPath::from_std_path(path, path_style).ok()) + let rel_path = path_style.strip_prefix(abs_path, work_directory_abs_path)?; + Some(RepoPath::from_rel_path(&rel_path)) } pub fn had_conflict_on_last_merge_head_change(&self, repo_path: &RepoPath) -> bool { diff --git a/crates/project/src/project.rs b/crates/project/src/project.rs index beebf5a1d133eb75fdd98184ddf7880b9cedc7e0..afc854bceb59f88a496b6fcb99e840184277c894 100644 --- a/crates/project/src/project.rs +++ b/crates/project/src/project.rs @@ -927,7 +927,7 @@ impl DirectoryLister { .map(|worktree| worktree.read(cx).abs_path().to_string_lossy().into_owned()) .or_else(|| std::env::home_dir().map(|dir| dir.to_string_lossy().into_owned())) .map(|mut s| { - s.push_str(path_style.separator()); + s.push_str(path_style.primary_separator()); s }) .unwrap_or_else(|| { diff --git a/crates/project_panel/src/project_panel.rs b/crates/project_panel/src/project_panel.rs index 6a7036fce81eee5810dfbc41f57119efd22cfdca..cde0b89bb9115476744ed606f16174039db62cf6 100644 --- a/crates/project_panel/src/project_panel.rs +++ b/crates/project_panel/src/project_panel.rs @@ -4837,7 +4837,7 @@ impl ProjectPanel { .collect::>(); let active_index = folded_ancestors.active_index(); let components_len = components.len(); - let delimiter = SharedString::new(path_style.separator()); + let delimiter = SharedString::new(path_style.primary_separator()); for (index, component) in components.iter().enumerate() { if index != 0 { let delimiter_target_index = index - 1; diff --git a/crates/settings_ui/src/settings_ui.rs b/crates/settings_ui/src/settings_ui.rs index 564e78dc57b8b27398d79f861b538a9cc9dbf21c..499d6b04653b06c41ef4e302cfd4b4e77efc95c9 100644 --- a/crates/settings_ui/src/settings_ui.rs +++ b/crates/settings_ui/src/settings_ui.rs @@ -2192,7 +2192,7 @@ impl SettingsWindow { format!( "{}{}{}", directory_name, - path_style.separator(), + path_style.primary_separator(), path.display(path_style) ) } diff --git a/crates/toolchain_selector/src/toolchain_selector.rs b/crates/toolchain_selector/src/toolchain_selector.rs index c017483a32325d13e85a5db34566a3b0bf6e15a5..96f692694dcf6b1adaa6494a4c1cbf6905c57c7c 100644 --- a/crates/toolchain_selector/src/toolchain_selector.rs +++ b/crates/toolchain_selector/src/toolchain_selector.rs @@ -876,7 +876,7 @@ impl ToolchainSelectorDelegate { .strip_prefix(&worktree_root) .ok() .and_then(|suffix| suffix.to_str()) - .map(|suffix| format!(".{}{suffix}", path_style.separator()).into()) + .map(|suffix| format!(".{}{suffix}", path_style.primary_separator()).into()) .unwrap_or(path) } } diff --git a/crates/util/src/paths.rs b/crates/util/src/paths.rs index 74929c6c831bcdb035756483ddbf9b2bc9ad444c..0834039e0e59ff4149614ad863bd7a07b4a2efd7 100644 --- a/crates/util/src/paths.rs +++ b/crates/util/src/paths.rs @@ -3,6 +3,7 @@ use globset::{Glob, GlobSet, GlobSetBuilder}; use itertools::Itertools; use regex::Regex; use serde::{Deserialize, Serialize}; +use std::borrow::Cow; use std::cmp::Ordering; use std::error::Error; use std::fmt::{Display, Formatter}; @@ -331,13 +332,20 @@ impl PathStyle { } #[inline] - pub fn separator(&self) -> &'static str { + pub fn primary_separator(&self) -> &'static str { match self { PathStyle::Posix => "/", PathStyle::Windows => "\\", } } + pub fn separators(&self) -> &'static [&'static str] { + match self { + PathStyle::Posix => &["/"], + PathStyle::Windows => &["\\", "/"], + } + } + pub fn is_windows(&self) -> bool { *self == PathStyle::Windows } @@ -353,25 +361,54 @@ impl PathStyle { } else { Some(format!( "{left}{}{right}", - if left.ends_with(self.separator()) { + if left.ends_with(self.primary_separator()) { "" } else { - self.separator() + self.primary_separator() } )) } } pub fn split(self, path_like: &str) -> (Option<&str>, &str) { - let Some(pos) = path_like.rfind(self.separator()) else { + let Some(pos) = path_like.rfind(self.primary_separator()) else { return (None, path_like); }; - let filename_start = pos + self.separator().len(); + let filename_start = pos + self.primary_separator().len(); ( Some(&path_like[..filename_start]), &path_like[filename_start..], ) } + + pub fn strip_prefix<'a>( + &self, + child: &'a Path, + parent: &'a Path, + ) -> Option> { + let parent = parent.to_str()?; + if parent.is_empty() { + return RelPath::new(child, *self).ok(); + } + let parent = self + .separators() + .iter() + .find_map(|sep| parent.strip_suffix(sep)) + .unwrap_or(parent); + let child = child.to_str()?; + let stripped = child.strip_prefix(parent)?; + if let Some(relative) = self + .separators() + .iter() + .find_map(|sep| stripped.strip_prefix(sep)) + { + RelPath::new(relative.as_ref(), *self).ok() + } else if stripped.is_empty() { + Some(Cow::Borrowed(RelPath::empty())) + } else { + None + } + } } #[derive(Debug, Clone)] @@ -788,7 +825,7 @@ impl PathMatcher { fn check_with_end_separator(&self, path: &Path) -> bool { let path_str = path.to_string_lossy(); - let separator = self.path_style.separator(); + let separator = self.path_style.primary_separator(); if path_str.ends_with(separator) { false } else { @@ -1311,6 +1348,8 @@ impl WslPath { #[cfg(test)] mod tests { + use crate::rel_path::rel_path; + use super::*; use util_macros::perf; @@ -2480,6 +2519,89 @@ mod tests { assert_eq!(strip_path_suffix(base, suffix), None); } + #[test] + fn test_strip_prefix() { + let expected = [ + ( + PathStyle::Posix, + "/a/b/c", + "/a/b", + Some(rel_path("c").into_arc()), + ), + ( + PathStyle::Posix, + "/a/b/c", + "/a/b/", + Some(rel_path("c").into_arc()), + ), + ( + PathStyle::Posix, + "/a/b/c", + "/", + Some(rel_path("a/b/c").into_arc()), + ), + (PathStyle::Posix, "/a/b/c", "", None), + (PathStyle::Posix, "/a/b//c", "/a/b/", None), + (PathStyle::Posix, "/a/bc", "/a/b", None), + ( + PathStyle::Posix, + "/a/b/c", + "/a/b/c", + Some(rel_path("").into_arc()), + ), + ( + PathStyle::Windows, + "C:\\a\\b\\c", + "C:\\a\\b", + Some(rel_path("c").into_arc()), + ), + ( + PathStyle::Windows, + "C:\\a\\b\\c", + "C:\\a\\b\\", + Some(rel_path("c").into_arc()), + ), + ( + PathStyle::Windows, + "C:\\a\\b\\c", + "C:\\", + Some(rel_path("a/b/c").into_arc()), + ), + (PathStyle::Windows, "C:\\a\\b\\c", "", None), + (PathStyle::Windows, "C:\\a\\b\\\\c", "C:\\a\\b\\", None), + (PathStyle::Windows, "C:\\a\\bc", "C:\\a\\b", None), + ( + PathStyle::Windows, + "C:\\a\\b/c", + "C:\\a\\b", + Some(rel_path("c").into_arc()), + ), + ( + PathStyle::Windows, + "C:\\a\\b/c", + "C:\\a\\b\\", + Some(rel_path("c").into_arc()), + ), + ( + PathStyle::Windows, + "C:\\a\\b/c", + "C:\\a\\b/", + Some(rel_path("c").into_arc()), + ), + ]; + let actual = expected.clone().map(|(style, child, parent, _)| { + ( + style, + child, + parent, + style + .strip_prefix(child.as_ref(), parent.as_ref()) + .map(|rel_path| rel_path.into_arc()), + ) + }); + pretty_assertions::assert_eq!(actual, expected); + } + #[cfg(target_os = "windows")] #[test] fn test_wsl_path() { diff --git a/crates/vim/src/command.rs b/crates/vim/src/command.rs index 70d0e93c5db5999878f2bb79c7fc42f16e6861a1..5bf0fca041cf274f38c84031e35903c9e339cc24 100644 --- a/crates/vim/src/command.rs +++ b/crates/vim/src/command.rs @@ -965,7 +965,7 @@ impl VimCommand { } }; - let rel_path = if args.ends_with(PathStyle::local().separator()) { + let rel_path = if args.ends_with(PathStyle::local().primary_separator()) { rel_path } else { rel_path @@ -998,7 +998,7 @@ impl VimCommand { .display(PathStyle::local()) .to_string(); if dir.is_dir { - path_string.push_str(PathStyle::local().separator()); + path_string.push_str(PathStyle::local().primary_separator()); } path_string }) diff --git a/crates/worktree/src/worktree.rs b/crates/worktree/src/worktree.rs index 1e8c1648dca98b267146211a9b36fb78f743fb82..a62f1b3cd1305a4e396a9fb0dd6b2f3212a321b6 100644 --- a/crates/worktree/src/worktree.rs +++ b/crates/worktree/src/worktree.rs @@ -999,7 +999,7 @@ impl Worktree { }; if worktree_relative_path.components().next().is_some() { - full_path_string.push_str(self.path_style.separator()); + full_path_string.push_str(self.path_style.primary_separator()); full_path_string.push_str(&worktree_relative_path.display(self.path_style)); } @@ -2108,8 +2108,8 @@ impl Snapshot { if path.file_name().is_some() { let mut abs_path = self.abs_path.to_string(); for component in path.components() { - if !abs_path.ends_with(self.path_style.separator()) { - abs_path.push_str(self.path_style.separator()); + if !abs_path.ends_with(self.path_style.primary_separator()) { + abs_path.push_str(self.path_style.primary_separator()); } abs_path.push_str(component); } From 1a23115773ac3466444256992f661ee31cbaace2 Mon Sep 17 00:00:00 2001 From: Jason Lee Date: Thu, 27 Nov 2025 01:03:42 +0800 Subject: [PATCH 063/749] gpui: Unify `track_scroll` method to receive a reference type (#43518) Release Notes: - N/A This PR to change the `track_scroll` method to receive a reference type like the [Div#track_scroll](https://docs.rs/gpui/latest/gpui/trait.StatefulInteractiveElement.html#method.track_scroll), [Div#track_focus](https://docs.rs/gpui/latest/gpui/trait.InteractiveElement.html#method.track_focus). ```diff - .track_scroll(self.scroll_handle.clone()) + .track_scroll(&self.scroll_handle) - .vertical_scrollbar_for(self.scroll_handle.clone(), window, cx) + .vertical_scrollbar_for(&self.scroll_handle, window, cx) ``` https://github.com/zed-industries/zed/blob/56a2f9cfcf0c6a3c38f596b58002953763cd890f/crates/gpui/src/elements/div.rs#L1088-L1093 https://github.com/zed-industries/zed/blob/56a2f9cfcf0c6a3c38f596b58002953763cd890f/crates/gpui/src/elements/div.rs#L613-L620 --- crates/acp_tools/src/acp_tools.rs | 2 +- crates/agent_ui/src/acp/thread_history.rs | 8 ++------ crates/agent_ui/src/acp/thread_view.rs | 2 +- crates/agent_ui/src/agent_configuration.rs | 2 +- .../src/agent_configuration/add_llm_provider_modal.rs | 2 +- .../configure_context_server_modal.rs | 5 ++--- .../configure_context_server_tools_modal.rs | 2 +- .../debugger_ui/src/session/running/breakpoint_list.rs | 4 ++-- crates/debugger_ui/src/session/running/memory_view.rs | 4 ++-- crates/debugger_ui/src/session/running/module_list.rs | 4 ++-- .../src/session/running/stack_frame_list.rs | 2 +- .../debugger_ui/src/session/running/variable_list.rs | 6 +++--- crates/editor/src/code_context_menus.rs | 6 +++--- crates/editor/src/hover_popover.rs | 4 ++-- crates/editor/src/signature_help.rs | 2 +- crates/extensions_ui/src/extensions_ui.rs | 4 ++-- crates/git_ui/src/git_panel.rs | 4 ++-- crates/gpui/examples/data_table.rs | 2 +- crates/gpui/src/elements/uniform_list.rs | 6 +++--- crates/language_tools/src/syntax_tree_view.rs | 4 ++-- crates/markdown/src/markdown.rs | 2 +- crates/markdown_preview/src/markdown_preview_view.rs | 2 +- crates/miniprofiler_ui/src/miniprofiler_ui.rs | 4 ++-- crates/onboarding/src/onboarding.rs | 2 +- crates/outline_panel/src/outline_panel.rs | 4 ++-- crates/picker/src/picker.rs | 6 +++--- crates/project_panel/src/project_panel.rs | 4 ++-- crates/recent_projects/src/remote_servers.rs | 2 +- crates/settings_ui/src/settings_ui.rs | 8 ++++---- crates/terminal_view/src/terminal_view.rs | 2 +- crates/ui/src/components/data_table.rs | 4 ++-- crates/ui/src/components/scrollbar.rs | 10 +++++----- crates/ui/src/components/tab_bar.rs | 4 ++-- crates/zed/src/zed/component_preview.rs | 2 +- 34 files changed, 63 insertions(+), 68 deletions(-) diff --git a/crates/acp_tools/src/acp_tools.rs b/crates/acp_tools/src/acp_tools.rs index 7615784676c7d9ff1782a6e9537e608cb927154d..0905effce38d1bfd4fa18e1d00169d6c7ef6c2d7 100644 --- a/crates/acp_tools/src/acp_tools.rs +++ b/crates/acp_tools/src/acp_tools.rs @@ -528,7 +528,7 @@ impl Render for AcpTools { .with_sizing_behavior(gpui::ListSizingBehavior::Auto) .size_full(), ) - .vertical_scrollbar_for(connection.list_state.clone(), window, cx) + .vertical_scrollbar_for(&connection.list_state, window, cx) .into_any() } } diff --git a/crates/agent_ui/src/acp/thread_history.rs b/crates/agent_ui/src/acp/thread_history.rs index 29759093303a684fdfd9ad255d269516ed7a29b9..e5c83d48f1fd4633591441ad88076e66d3eb1e62 100644 --- a/crates/agent_ui/src/acp/thread_history.rs +++ b/crates/agent_ui/src/acp/thread_history.rs @@ -527,14 +527,10 @@ impl Render for AcpThreadHistory { ) .p_1() .pr_4() - .track_scroll(self.scroll_handle.clone()) + .track_scroll(&self.scroll_handle) .flex_grow(), ) - .vertical_scrollbar_for( - self.scroll_handle.clone(), - window, - cx, - ) + .vertical_scrollbar_for(&self.scroll_handle, window, cx) } }) .when(!has_no_history, |this| { diff --git a/crates/agent_ui/src/acp/thread_view.rs b/crates/agent_ui/src/acp/thread_view.rs index 45b15e6e9e3eaa03fc69912eab3e778335b714d4..a2929ad23ba8558b61abbf1d25ffe3843a918c2e 100644 --- a/crates/agent_ui/src/acp/thread_view.rs +++ b/crates/agent_ui/src/acp/thread_view.rs @@ -5896,7 +5896,7 @@ impl Render for AcpThreadView { .flex_grow() .into_any(), ) - .vertical_scrollbar_for(self.list_state.clone(), window, cx) + .vertical_scrollbar_for(&self.list_state, window, cx) .into_any() } else { this.child(self.render_recent_history(cx)).into_any() diff --git a/crates/agent_ui/src/agent_configuration.rs b/crates/agent_ui/src/agent_configuration.rs index ef6b90ad89e2e038e96d8864d4c2ce0ecf333d6e..f831329e2cde40dbb9d4b9e882d6bc942f383422 100644 --- a/crates/agent_ui/src/agent_configuration.rs +++ b/crates/agent_ui/src/agent_configuration.rs @@ -1209,7 +1209,7 @@ impl Render for AgentConfiguration { .child(self.render_context_servers_section(window, cx)) .child(self.render_provider_configuration_section(cx)), ) - .vertical_scrollbar_for(self.scroll_handle.clone(), window, cx), + .vertical_scrollbar_for(&self.scroll_handle, window, cx), ) } } diff --git a/crates/agent_ui/src/agent_configuration/add_llm_provider_modal.rs b/crates/agent_ui/src/agent_configuration/add_llm_provider_modal.rs index 3427dab0d22c6900a3078f1dcb4cc7e892cce7db..02269511bb9a4d9b95fe27b66e3ca0a9e5c498c5 100644 --- a/crates/agent_ui/src/agent_configuration/add_llm_provider_modal.rs +++ b/crates/agent_ui/src/agent_configuration/add_llm_provider_modal.rs @@ -516,7 +516,7 @@ impl Render for AddLlmProviderModal { .child( div() .size_full() - .vertical_scrollbar_for(self.scroll_handle.clone(), window, cx) + .vertical_scrollbar_for(&self.scroll_handle, window, cx) .child( v_flex() .id("modal_content") diff --git a/crates/agent_ui/src/agent_configuration/configure_context_server_modal.rs b/crates/agent_ui/src/agent_configuration/configure_context_server_modal.rs index a93df3839d98d95e2f91833078dbe96bc3fb8889..85f527ff5a1262aa36657316d86999ac617fb09d 100644 --- a/crates/agent_ui/src/agent_configuration/configure_context_server_modal.rs +++ b/crates/agent_ui/src/agent_configuration/configure_context_server_modal.rs @@ -821,7 +821,6 @@ impl ConfigureContextServerModal { impl Render for ConfigureContextServerModal { fn render(&mut self, window: &mut Window, cx: &mut Context) -> impl IntoElement { - let scroll_handle = self.scroll_handle.clone(); div() .elevation_3(cx) .w(rems(34.)) @@ -849,7 +848,7 @@ impl Render for ConfigureContextServerModal { .id("modal-content") .max_h(vh(0.7, window)) .overflow_y_scroll() - .track_scroll(&scroll_handle) + .track_scroll(&self.scroll_handle) .child(self.render_modal_description(window, cx)) .child(self.render_modal_content(cx)) .child(match &self.state { @@ -862,7 +861,7 @@ impl Render for ConfigureContextServerModal { } }), ) - .vertical_scrollbar_for(scroll_handle, window, cx), + .vertical_scrollbar_for(&self.scroll_handle, window, cx), ), ) .footer(self.render_modal_footer(cx)), diff --git a/crates/agent_ui/src/agent_configuration/configure_context_server_tools_modal.rs b/crates/agent_ui/src/agent_configuration/configure_context_server_tools_modal.rs index 3fe0b8d1b1400b4362192261995ed5b6bd1cb662..3573c8b67ee81ef9cd1decacefb52017dabdb178 100644 --- a/crates/agent_ui/src/agent_configuration/configure_context_server_tools_modal.rs +++ b/crates/agent_ui/src/agent_configuration/configure_context_server_tools_modal.rs @@ -138,7 +138,7 @@ impl ConfigureContextServerToolsModal { items })), ) - .vertical_scrollbar_for(self.scroll_handle.clone(), window, cx) + .vertical_scrollbar_for(&self.scroll_handle, window, cx) .into_any_element() } } diff --git a/crates/debugger_ui/src/session/running/breakpoint_list.rs b/crates/debugger_ui/src/session/running/breakpoint_list.rs index ca50f67c9236d19a9f04f327091eb383ab72e122..2c7e2074678290356b7669228dcf29008f1cc36b 100644 --- a/crates/debugger_ui/src/session/running/breakpoint_list.rs +++ b/crates/debugger_ui/src/session/running/breakpoint_list.rs @@ -575,7 +575,7 @@ impl BreakpointList { ) .with_horizontal_sizing_behavior(gpui::ListHorizontalSizingBehavior::Unconstrained) .with_width_from_item(self.max_width_index) - .track_scroll(self.scroll_handle.clone()) + .track_scroll(&self.scroll_handle) .flex_1() } @@ -776,7 +776,7 @@ impl Render for BreakpointList { .child(self.render_list(cx)) .custom_scrollbars( ui::Scrollbars::new(ScrollAxes::Both) - .tracked_scroll_handle(self.scroll_handle.clone()) + .tracked_scroll_handle(&self.scroll_handle) .with_track_along(ScrollAxes::Both, cx.theme().colors().panel_background) .tracked_entity(cx.entity_id()), window, diff --git a/crates/debugger_ui/src/session/running/memory_view.rs b/crates/debugger_ui/src/session/running/memory_view.rs index 8670beb0f5f93f68a6052b868a866e22b82c92fd..55a8e8429eb23cd0bfcaa7d592d16797c061d2ae 100644 --- a/crates/debugger_ui/src/session/running/memory_view.rs +++ b/crates/debugger_ui/src/session/running/memory_view.rs @@ -229,7 +229,7 @@ impl MemoryView { rows }, ) - .track_scroll(view_state.scroll_handle) + .track_scroll(&view_state.scroll_handle) .with_horizontal_sizing_behavior(ListHorizontalSizingBehavior::Unconstrained) .on_scroll_wheel(cx.listener(|this, evt: &ScrollWheelEvent, window, _| { let mut view_state = this.view_state(); @@ -921,7 +921,7 @@ impl Render for MemoryView { })) .custom_scrollbars( ui::Scrollbars::new(ui::ScrollAxes::Both) - .tracked_scroll_handle(self.view_state_handle.clone()) + .tracked_scroll_handle(&self.view_state_handle) .with_track_along( ui::ScrollAxes::Both, cx.theme().colors().panel_background, diff --git a/crates/debugger_ui/src/session/running/module_list.rs b/crates/debugger_ui/src/session/running/module_list.rs index 545d8392745c636b805cfc1e0743170635ef8abe..19f407eb23f8acf0aa665f5119ecfd2156eb685f 100644 --- a/crates/debugger_ui/src/session/running/module_list.rs +++ b/crates/debugger_ui/src/session/running/module_list.rs @@ -253,7 +253,7 @@ impl ModuleList { range.map(|ix| this.render_entry(ix, cx)).collect() }), ) - .track_scroll(self.scroll_handle.clone()) + .track_scroll(&self.scroll_handle) .size_full() } } @@ -279,6 +279,6 @@ impl Render for ModuleList { .size_full() .p_1() .child(self.render_list(window, cx)) - .vertical_scrollbar_for(self.scroll_handle.clone(), window, cx) + .vertical_scrollbar_for(&self.scroll_handle, window, cx) } } diff --git a/crates/debugger_ui/src/session/running/stack_frame_list.rs b/crates/debugger_ui/src/session/running/stack_frame_list.rs index a8fabd327a3de630ff884899fe7af1167932618c..96a910af4dd0ac901c6802c139ddd5b8b3d728bc 100644 --- a/crates/debugger_ui/src/session/running/stack_frame_list.rs +++ b/crates/debugger_ui/src/session/running/stack_frame_list.rs @@ -913,7 +913,7 @@ impl Render for StackFrameList { ) }) .child(self.render_list(window, cx)) - .vertical_scrollbar_for(self.list_state.clone(), window, cx) + .vertical_scrollbar_for(&self.list_state, window, cx) } } diff --git a/crates/debugger_ui/src/session/running/variable_list.rs b/crates/debugger_ui/src/session/running/variable_list.rs index 7d736aace58ab1b27ccab5690cf24d4cff9a47f6..1b455b59d7d12712a3d4adc713a6ed15e8166c6e 100644 --- a/crates/debugger_ui/src/session/running/variable_list.rs +++ b/crates/debugger_ui/src/session/running/variable_list.rs @@ -1557,7 +1557,7 @@ impl Render for VariableList { this.render_entries(range, window, cx) }), ) - .track_scroll(self.list_handle.clone()) + .track_scroll(&self.list_handle) .with_width_from_item(self.max_width_index) .with_sizing_behavior(gpui::ListSizingBehavior::Auto) .with_horizontal_sizing_behavior(gpui::ListHorizontalSizingBehavior::Unconstrained) @@ -1574,10 +1574,10 @@ impl Render for VariableList { ) .with_priority(1) })) - // .vertical_scrollbar_for(self.list_handle.clone(), window, cx) + // .vertical_scrollbar_for(&self.list_handle, window, cx) .custom_scrollbars( ui::Scrollbars::new(ScrollAxes::Both) - .tracked_scroll_handle(self.list_handle.clone()) + .tracked_scroll_handle(&self.list_handle) .with_track_along(ScrollAxes::Both, cx.theme().colors().panel_background) .tracked_entity(cx.entity_id()), window, diff --git a/crates/editor/src/code_context_menus.rs b/crates/editor/src/code_context_menus.rs index 9a2b8c385689f284fc42e49a5c7451b3774fe018..6a07d39210773476b5f88764c5a21f292da48676 100644 --- a/crates/editor/src/code_context_menus.rs +++ b/crates/editor/src/code_context_menus.rs @@ -933,7 +933,7 @@ impl CompletionsMenu { ) .occlude() .max_h(max_height_in_lines as f32 * window.line_height()) - .track_scroll(self.scroll_handle.clone()) + .track_scroll(&self.scroll_handle) .with_sizing_behavior(ListSizingBehavior::Infer) .map(|this| { if self.display_options.dynamic_width { @@ -948,7 +948,7 @@ impl CompletionsMenu { div().child(list).custom_scrollbars( Scrollbars::for_settings::() .show_along(ScrollAxes::Vertical) - .tracked_scroll_handle(self.scroll_handle.clone()), + .tracked_scroll_handle(&self.scroll_handle), window, cx, ), @@ -1599,7 +1599,7 @@ impl CodeActionsMenu { ) .occlude() .max_h(max_height_in_lines as f32 * window.line_height()) - .track_scroll(self.scroll_handle.clone()) + .track_scroll(&self.scroll_handle) .with_width_from_item( self.actions .iter() diff --git a/crates/editor/src/hover_popover.rs b/crates/editor/src/hover_popover.rs index 5f831341bab2a4e37410a1e3e168bcf72bba93a8..0b9a25d3ee0fcb1cb67497bf51fe41ed73a3692e 100644 --- a/crates/editor/src/hover_popover.rs +++ b/crates/editor/src/hover_popover.rs @@ -914,7 +914,7 @@ impl InfoPopover { ) .custom_scrollbars( Scrollbars::for_settings::() - .tracked_scroll_handle(self.scroll_handle.clone()), + .tracked_scroll_handle(&self.scroll_handle), window, cx, ) @@ -1012,7 +1012,7 @@ impl DiagnosticPopover { ) .custom_scrollbars( Scrollbars::for_settings::() - .tracked_scroll_handle(self.scroll_handle.clone()), + .tracked_scroll_handle(&self.scroll_handle), window, cx, ), diff --git a/crates/editor/src/signature_help.rs b/crates/editor/src/signature_help.rs index b394364e01cbd647a0e17afc0ddc13afdb12ced3..2554db2450103709275b3f7946076fd891326d84 100644 --- a/crates/editor/src/signature_help.rs +++ b/crates/editor/src/signature_help.rs @@ -391,7 +391,7 @@ impl SignatureHelpPopover { ) }), ) - .vertical_scrollbar_for(self.scroll_handle.clone(), window, cx); + .vertical_scrollbar_for(&self.scroll_handle, window, cx); let controls = if self.signatures.len() > 1 { let prev_button = IconButton::new("signature_help_prev", IconName::ChevronUp) diff --git a/crates/extensions_ui/src/extensions_ui.rs b/crates/extensions_ui/src/extensions_ui.rs index e35c90b6104b44bd6dbf3fe86aeaf84f122c04ca..e6d30527e0d7672255bf8f61cfd56fe06b409920 100644 --- a/crates/extensions_ui/src/extensions_ui.rs +++ b/crates/extensions_ui/src/extensions_ui.rs @@ -1704,12 +1704,12 @@ impl Render for ExtensionsPage { if count == 0 { this.child(self.render_empty_state(cx)).into_any_element() } else { - let scroll_handle = self.list.clone(); + let scroll_handle = &self.list; this.child( uniform_list("entries", count, cx.processor(Self::render_extensions)) .flex_grow() .pb_4() - .track_scroll(scroll_handle.clone()), + .track_scroll(scroll_handle), ) .vertical_scrollbar_for(scroll_handle, window, cx) .into_any_element() diff --git a/crates/git_ui/src/git_panel.rs b/crates/git_ui/src/git_panel.rs index 1f66d194477c64fef207e63d4c87ad4d76675f65..cf6babb401b6f407506595f3dd95592e98c18286 100644 --- a/crates/git_ui/src/git_panel.rs +++ b/crates/git_ui/src/git_panel.rs @@ -3939,7 +3939,7 @@ impl GitPanel { ListHorizontalSizingBehavior::Unconstrained, ) .with_width_from_item(self.max_width_item_index) - .track_scroll(self.scroll_handle.clone()), + .track_scroll(&self.scroll_handle), ) .on_mouse_down( MouseButton::Right, @@ -3949,7 +3949,7 @@ impl GitPanel { ) .custom_scrollbars( Scrollbars::for_settings::() - .tracked_scroll_handle(self.scroll_handle.clone()) + .tracked_scroll_handle(&self.scroll_handle) .with_track_along( ScrollAxes::Horizontal, cx.theme().colors().panel_background, diff --git a/crates/gpui/examples/data_table.rs b/crates/gpui/examples/data_table.rs index 56c9625ed3039b872cf4fcc70e84719ce903e268..dd1a443a9dfaa28a5079a034b8214ce1bbf01da8 100644 --- a/crates/gpui/examples/data_table.rs +++ b/crates/gpui/examples/data_table.rs @@ -438,7 +438,7 @@ impl Render for DataTable { }), ) .size_full() - .track_scroll(self.scroll_handle.clone()), + .track_scroll(&self.scroll_handle), ) .child(self.render_scrollbar(window, cx)), ), diff --git a/crates/gpui/src/elements/uniform_list.rs b/crates/gpui/src/elements/uniform_list.rs index 72843ea6330aaa24d9e1d6bf34d024cdeb54ad4a..1e38b0e7ac9abcf891201b7db61b819abe00ef1e 100644 --- a/crates/gpui/src/elements/uniform_list.rs +++ b/crates/gpui/src/elements/uniform_list.rs @@ -668,9 +668,9 @@ impl UniformList { } /// Track and render scroll state of this list with reference to the given scroll handle. - pub fn track_scroll(mut self, handle: UniformListScrollHandle) -> Self { + pub fn track_scroll(mut self, handle: &UniformListScrollHandle) -> Self { self.interactivity.tracked_scroll_handle = Some(handle.0.borrow().base_handle.clone()); - self.scroll_handle = Some(handle); + self.scroll_handle = Some(handle.clone()); self } @@ -780,7 +780,7 @@ mod test { .collect() }), ) - .track_scroll(self.scroll_handle.clone()) + .track_scroll(&self.scroll_handle) .h(px(200.0)), ) } diff --git a/crates/language_tools/src/syntax_tree_view.rs b/crates/language_tools/src/syntax_tree_view.rs index 885f6bed327c765019ae166e21eab112f884e7dd..3ac007c134657ff33259f961f170d5a7d732a22c 100644 --- a/crates/language_tools/src/syntax_tree_view.rs +++ b/crates/language_tools/src/syntax_tree_view.rs @@ -507,11 +507,11 @@ impl Render for SyntaxTreeView { }), ) .size_full() - .track_scroll(self.list_scroll_handle.clone()) + .track_scroll(&self.list_scroll_handle) .text_bg(cx.theme().colors().background) .into_any_element(), ) - .vertical_scrollbar_for(self.list_scroll_handle.clone(), window, cx) + .vertical_scrollbar_for(&self.list_scroll_handle, window, cx) .into_any_element() } else { let inner_content = v_flex() diff --git a/crates/markdown/src/markdown.rs b/crates/markdown/src/markdown.rs index 1de6d494ffbf445ca8ee3df9d1e83b5575f8224e..dd0d726734173591cb9ed9f8cc965d06aaee7e89 100644 --- a/crates/markdown/src/markdown.rs +++ b/crates/markdown/src/markdown.rs @@ -889,7 +889,7 @@ impl Element for MarkdownElement { { let scrollbars = Scrollbars::new(ScrollAxes::Horizontal) .id(("markdown-code-block-scrollbar", range.start)) - .tracked_scroll_handle(scroll_handle.clone()) + .tracked_scroll_handle(scroll_handle) .with_track_along( ScrollAxes::Horizontal, cx.theme().colors().editor_background, diff --git a/crates/markdown_preview/src/markdown_preview_view.rs b/crates/markdown_preview/src/markdown_preview_view.rs index c4d3c033df6395235603837bf0944eeb59d3dfbc..4126a31379fa74a750a7d111ac71dc180a3bb0ff 100644 --- a/crates/markdown_preview/src/markdown_preview_view.rs +++ b/crates/markdown_preview/src/markdown_preview_view.rs @@ -611,6 +611,6 @@ impl Render for MarkdownPreviewView { .size_full(), ) })) - .vertical_scrollbar_for(self.list_state.clone(), window, cx) + .vertical_scrollbar_for(&self.list_state, window, cx) } } diff --git a/crates/miniprofiler_ui/src/miniprofiler_ui.rs b/crates/miniprofiler_ui/src/miniprofiler_ui.rs index 93ccfc559c6eedc5e1be1c3ca68355aeba878a76..ea59b43cc1dbc2cb1b8476b00d8fa7d07636afec 100644 --- a/crates/miniprofiler_ui/src/miniprofiler_ui.rs +++ b/crates/miniprofiler_ui/src/miniprofiler_ui.rs @@ -400,10 +400,10 @@ impl Render for ProfilerWindow { this.autoscroll = false; cx.notify(); })) - .track_scroll(self.scroll_handle.clone()) + .track_scroll(&self.scroll_handle) .size_full(), ) - .vertical_scrollbar_for(self.scroll_handle.clone(), window, cx), + .vertical_scrollbar_for(&self.scroll_handle, window, cx), ) }) } diff --git a/crates/onboarding/src/onboarding.rs b/crates/onboarding/src/onboarding.rs index 404af2c74f9524aa1d52db39de2354bbe4564240..94581e142339cde9d4f1f01a3fb361ae810c1efa 100644 --- a/crates/onboarding/src/onboarding.rs +++ b/crates/onboarding/src/onboarding.rs @@ -350,7 +350,7 @@ impl Render for Onboarding { .child(self.render_page(cx)) .track_scroll(&self.scroll_handle), ) - .vertical_scrollbar_for(self.scroll_handle.clone(), window, cx), + .vertical_scrollbar_for(&self.scroll_handle, window, cx), ) } } diff --git a/crates/outline_panel/src/outline_panel.rs b/crates/outline_panel/src/outline_panel.rs index 1e649b2eb64fda970f845e9376be3f61944dde85..6e78b8a1e1f573d9870d42c6a5e99c8574e6979a 100644 --- a/crates/outline_panel/src/outline_panel.rs +++ b/crates/outline_panel/src/outline_panel.rs @@ -4639,7 +4639,7 @@ impl OutlinePanel { .with_sizing_behavior(ListSizingBehavior::Infer) .with_horizontal_sizing_behavior(ListHorizontalSizingBehavior::Unconstrained) .with_width_from_item(self.max_width_item_index) - .track_scroll(self.scroll_handle.clone()) + .track_scroll(&self.scroll_handle) .when(show_indent_guides, |list| { list.with_decoration( ui::indent_guides(px(indent_size), IndentGuideColors::panel(cx)) @@ -4692,7 +4692,7 @@ impl OutlinePanel { .child(list_contents.size_full().flex_shrink()) .custom_scrollbars( Scrollbars::for_settings::() - .tracked_scroll_handle(self.scroll_handle.clone()) + .tracked_scroll_handle(&self.scroll_handle.clone()) .with_track_along( ScrollAxes::Horizontal, cx.theme().colors().panel_background, diff --git a/crates/picker/src/picker.rs b/crates/picker/src/picker.rs index 4e7dba59ad39399b9edab30f553bdc17545540dd..8fb4941b716efa8186937ec7b49bcc3cfb26d44b 100644 --- a/crates/picker/src/picker.rs +++ b/crates/picker/src/picker.rs @@ -780,7 +780,7 @@ impl Picker { }) .flex_grow() .py_1() - .track_scroll(scroll_handle.clone()) + .track_scroll(&scroll_handle) .into_any_element(), ElementContainer::List(state) => list( state.clone(), @@ -866,12 +866,12 @@ impl Render for Picker { this.map(|this| match &self.element_container { ElementContainer::List(state) => this.custom_scrollbars( - base_scrollbar_config.tracked_scroll_handle(state.clone()), + base_scrollbar_config.tracked_scroll_handle(state), window, cx, ), ElementContainer::UniformList(state) => this.custom_scrollbars( - base_scrollbar_config.tracked_scroll_handle(state.clone()), + base_scrollbar_config.tracked_scroll_handle(state), window, cx, ), diff --git a/crates/project_panel/src/project_panel.rs b/crates/project_panel/src/project_panel.rs index cde0b89bb9115476744ed606f16174039db62cf6..e9af8bbe3fff1f5ff7d910b6aa16e05090351777 100644 --- a/crates/project_panel/src/project_panel.rs +++ b/crates/project_panel/src/project_panel.rs @@ -5765,7 +5765,7 @@ impl Render for ProjectPanel { ListHorizontalSizingBehavior::Unconstrained, ) .with_width_from_item(self.state.max_width_item_index) - .track_scroll(self.scroll_handle.clone()), + .track_scroll(&self.scroll_handle), ) .child( div() @@ -5908,7 +5908,7 @@ impl Render for ProjectPanel { ) .custom_scrollbars( Scrollbars::for_settings::() - .tracked_scroll_handle(self.scroll_handle.clone()) + .tracked_scroll_handle(&self.scroll_handle) .with_track_along( ScrollAxes::Horizontal, cx.theme().colors().panel_background, diff --git a/crates/recent_projects/src/remote_servers.rs b/crates/recent_projects/src/remote_servers.rs index 76b0b230dc16b5d5e594379bb94b30ca66b9b317..6dff231b30ddde741f69ba9d4e0366517d8e2751 100644 --- a/crates/recent_projects/src/remote_servers.rs +++ b/crates/recent_projects/src/remote_servers.rs @@ -2160,7 +2160,7 @@ impl RemoteServerProjects { ) .size_full(), ) - .vertical_scrollbar_for(state.scroll_handle, window, cx), + .vertical_scrollbar_for(&state.scroll_handle, window, cx), ), ) .into_any_element() diff --git a/crates/settings_ui/src/settings_ui.rs b/crates/settings_ui/src/settings_ui.rs index 499d6b04653b06c41ef4e302cfd4b4e77efc95c9..2726e1cbd6da7b568d3412791826bc1f7b826397 100644 --- a/crates/settings_ui/src/settings_ui.rs +++ b/crates/settings_ui/src/settings_ui.rs @@ -2455,9 +2455,9 @@ impl SettingsWindow { }), ) .size_full() - .track_scroll(self.navbar_scroll_handle.clone()), + .track_scroll(&self.navbar_scroll_handle), ) - .vertical_scrollbar_for(self.navbar_scroll_handle.clone(), window, cx), + .vertical_scrollbar_for(&self.navbar_scroll_handle, window, cx), ) .child( h_flex() @@ -3012,10 +3012,10 @@ impl SettingsWindow { window.focus_prev(); })) .when(sub_page_stack().is_empty(), |this| { - this.vertical_scrollbar_for(self.list_state.clone(), window, cx) + this.vertical_scrollbar_for(&self.list_state, window, cx) }) .when(!sub_page_stack().is_empty(), |this| { - this.vertical_scrollbar_for(self.sub_page_scroll_handle.clone(), window, cx) + this.vertical_scrollbar_for(&self.sub_page_scroll_handle, window, cx) }) .track_focus(&self.content_focus_handle.focus_handle(cx)) .pt_6() diff --git a/crates/terminal_view/src/terminal_view.rs b/crates/terminal_view/src/terminal_view.rs index 7b3e29ac9b0582d081a286539d973fe8f1a453c5..64336886a4b430f780db1126b8d677e51cff066b 100644 --- a/crates/terminal_view/src/terminal_view.rs +++ b/crates/terminal_view/src/terminal_view.rs @@ -1118,7 +1118,7 @@ impl Render for TerminalView { ScrollAxes::Vertical, cx.theme().colors().editor_background, ) - .tracked_scroll_handle(self.scroll_handle.clone()), + .tracked_scroll_handle(&self.scroll_handle), window, cx, ) diff --git a/crates/ui/src/components/data_table.rs b/crates/ui/src/components/data_table.rs index a505281cf3fa9868a19a04c168d0b1b5c71a4f85..f7cce2b85ffa3aeb9f97634c6c0fa65c46f4a8e7 100644 --- a/crates/ui/src/components/data_table.rs +++ b/crates/ui/src/components/data_table.rs @@ -872,7 +872,7 @@ impl RenderOnce for Table { interaction_state.as_ref(), |this, state| { this.track_scroll( - state.read_with(cx, |s, _| s.scroll_handle.clone()), + &state.read_with(cx, |s, _| s.scroll_handle.clone()), ) }, ), @@ -906,7 +906,7 @@ impl RenderOnce for Table { .unwrap_or_else(|| Scrollbars::new(super::ScrollAxes::Both)); content .custom_scrollbars( - scrollbars.tracked_scroll_handle(state.read(cx).scroll_handle.clone()), + scrollbars.tracked_scroll_handle(&state.read(cx).scroll_handle), window, cx, ) diff --git a/crates/ui/src/components/scrollbar.rs b/crates/ui/src/components/scrollbar.rs index d3d33a296bbd65edb24371d8f5f1e6462e77e3fe..391d480fb313d078bb20ab790ecbb61d7425257a 100644 --- a/crates/ui/src/components/scrollbar.rs +++ b/crates/ui/src/components/scrollbar.rs @@ -150,9 +150,9 @@ pub trait WithScrollbar: Sized { // } #[track_caller] - fn vertical_scrollbar_for( + fn vertical_scrollbar_for( self, - scroll_handle: ScrollHandle, + scroll_handle: &ScrollHandle, window: &mut Window, cx: &mut App, ) -> Self::Output { @@ -441,7 +441,7 @@ impl Scrollbars { pub fn tracked_scroll_handle( self, - tracked_scroll_handle: TrackedHandle, + tracked_scroll_handle: &TrackedHandle, ) -> Scrollbars { let Self { id, @@ -454,7 +454,7 @@ impl Scrollbars { } = self; Scrollbars { - scrollable_handle: Handle::Tracked(tracked_scroll_handle), + scrollable_handle: Handle::Tracked(tracked_scroll_handle.clone()), id, tracked_entity: tracked_entity_id, visibility, @@ -968,7 +968,7 @@ impl ScrollableHandle for ScrollHandle { } } -pub trait ScrollableHandle: 'static + Any + Sized { +pub trait ScrollableHandle: 'static + Any + Sized + Clone { fn max_offset(&self) -> Size; fn set_offset(&self, point: Point); fn offset(&self) -> Point; diff --git a/crates/ui/src/components/tab_bar.rs b/crates/ui/src/components/tab_bar.rs index 3c467c06ce2654c5886c30e42dfb7276fdb7d289..5d41466e3caadf6697b3c1681a405dafa2fb3101 100644 --- a/crates/ui/src/components/tab_bar.rs +++ b/crates/ui/src/components/tab_bar.rs @@ -24,8 +24,8 @@ impl TabBar { } } - pub fn track_scroll(mut self, scroll_handle: ScrollHandle) -> Self { - self.scroll_handle = Some(scroll_handle); + pub fn track_scroll(mut self, scroll_handle: &ScrollHandle) -> Self { + self.scroll_handle = Some(scroll_handle.clone()); self } diff --git a/crates/zed/src/zed/component_preview.rs b/crates/zed/src/zed/component_preview.rs index 18279d8ee88821d44166fb5aedebca2e51ae9491..c231836aaa9219cab2ed913db70ad1704606dfd1 100644 --- a/crates/zed/src/zed/component_preview.rs +++ b/crates/zed/src/zed/component_preview.rs @@ -627,7 +627,7 @@ impl Render for ComponentPreview { .collect() }), ) - .track_scroll(self.nav_scroll_handle.clone()) + .track_scroll(&self.nav_scroll_handle) .p_2p5() .w(px(231.)) // Matches perfectly with the size of the "Component Preview" tab, if that's the first one in the pane .h_full() From 6b92c1a47bbfd867c916ad82668caa62ab31aeee Mon Sep 17 00:00:00 2001 From: Piotr Osiewicz <24362066+osiewicz@users.noreply.github.com> Date: Wed, 26 Nov 2025 18:21:22 +0100 Subject: [PATCH 064/749] workspace: Fix broken main build after #43518 (#43570) *cough* merge queue *cough* Release Notes: - N/A --- crates/workspace/src/notifications.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/workspace/src/notifications.rs b/crates/workspace/src/notifications.rs index 75c35cda22d72d659040154a079fe78af78cf414..cfdc730b4db5be8e2f4a317dcf7e12072af40a88 100644 --- a/crates/workspace/src/notifications.rs +++ b/crates/workspace/src/notifications.rs @@ -789,7 +789,7 @@ pub mod simple_message_notification { .track_scroll(&self.scroll_handle.clone()) .child((self.build_content)(window, cx)), ) - .vertical_scrollbar_for(self.scroll_handle.clone(), window, cx), + .vertical_scrollbar_for(&self.scroll_handle, window, cx), ) .show_close_button(self.show_close_button) .show_suppress_button(self.show_suppress_button) From 233b97644190a7bb721fd105068bb7f4aafaeeab Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Miguel=20Raz=20Guzm=C3=A1n=20Macedo?= Date: Wed, 26 Nov 2025 11:57:27 -0600 Subject: [PATCH 065/749] Add WSL Linux choice and settings.json prompt for GitHub issue template (#43479) Release Notes: - N/A --------- Co-authored-by: Kunall Banerjee --- .github/ISSUE_TEMPLATE/1.bug-report.yml | 29 +++++++++++++++++++++++++ 1 file changed, 29 insertions(+) diff --git a/.github/ISSUE_TEMPLATE/1.bug-report.yml b/.github/ISSUE_TEMPLATE/1.bug-report.yml index 543c22117c2aa889b91fddd9eddd905c09dd0644..1fbb81af8e5e6bd8ebc8582c3528f5b88929f041 100644 --- a/.github/ISSUE_TEMPLATE/1.bug-report.yml +++ b/.github/ISSUE_TEMPLATE/1.bug-report.yml @@ -46,6 +46,22 @@ body: validations: required: false + - type: textarea + attributes: + label: If applicable, attach your relevant Zed settings to this issue + description: | + Open the command palette in Zed, then type “zed: open settings file” and copy/paste any relevant (e.g., LSP-specific) settings. + value: | +
settings.json + + + ```json + + ``` + +
+ validations: + required: false - type: textarea attributes: label: If applicable, provide details about your model provider @@ -68,3 +84,16 @@ body: Architecture: aarch64 validations: required: true + - type: dropdown + attributes: + label: If you are using WSL on Windows, what flavor of Linux are you using? + multiple: false + options: + - Arch Linux + - Ubuntu + - Fedora + - Mint + - Pop!_OS + - NixOS + - Other + default: 0 From 61a414df77a49278b6c734b486d7cda419cf781e Mon Sep 17 00:00:00 2001 From: Dino Date: Wed, 26 Nov 2025 19:34:03 +0000 Subject: [PATCH 066/749] Fix language server renaming when parent directory does not exist (#43499) Update the `fs::RenameOptions` used by `project::lsp_store::LocalLspStore.deserialize_workspace_edit` in order to always set `create_parents` to `true`. Doing this ensures that we'll always create the folders for the new file path provided by the language server instead of failing to handle the request in case the parent - Introduce `create_parents` field to `fs::RenameOptions` - Update `fs::RealFs.rename` to ensure that the `create_parents` option is respected Closes #41820 Release Notes: - Fixed a bug where using language server's file renaming actions could fail if the parent directory of the new file did not exist --- .../assistant_text_thread/src/text_thread.rs | 1 + crates/fs/src/fs.rs | 73 +++++++++++++++++++ crates/project/src/agent_server_store.rs | 1 + crates/project/src/lsp_store.rs | 28 ++++--- crates/worktree/src/worktree_tests.rs | 2 + 5 files changed, 94 insertions(+), 11 deletions(-) diff --git a/crates/assistant_text_thread/src/text_thread.rs b/crates/assistant_text_thread/src/text_thread.rs index 613c9b862e8a0b055465a73fe34c541ecb18d4a1..7f24c8f665f8d34aed199562dce1131797f13c9d 100644 --- a/crates/assistant_text_thread/src/text_thread.rs +++ b/crates/assistant_text_thread/src/text_thread.rs @@ -2933,6 +2933,7 @@ impl TextThread { RenameOptions { overwrite: true, ignore_if_exists: true, + create_parents: false, }, ) .await?; diff --git a/crates/fs/src/fs.rs b/crates/fs/src/fs.rs index 5a6e4bdfdba48af25342d4d1ecfafd1d4ce0709b..5be94ab6302b0a950b91e32dc43da374f0c62f29 100644 --- a/crates/fs/src/fs.rs +++ b/crates/fs/src/fs.rs @@ -193,6 +193,8 @@ pub struct CopyOptions { pub struct RenameOptions { pub overwrite: bool, pub ignore_if_exists: bool, + /// Whether to create parent directories if they do not exist. + pub create_parents: bool, } #[derive(Copy, Clone, Default)] @@ -579,6 +581,12 @@ impl Fs for RealFs { } } + if options.create_parents { + if let Some(parent) = target.parent() { + self.create_dir(parent).await?; + } + } + smol::fs::rename(source, target).await?; Ok(()) } @@ -2357,6 +2365,12 @@ impl Fs for FakeFs { let old_path = normalize_path(old_path); let new_path = normalize_path(new_path); + if options.create_parents { + if let Some(parent) = new_path.parent() { + self.create_dir(parent).await?; + } + } + let mut state = self.state.lock(); let moved_entry = state.write_path(&old_path, |e| { if let btree_map::Entry::Occupied(e) = e { @@ -3396,4 +3410,63 @@ mod tests { let content = std::fs::read_to_string(&file_to_be_replaced).unwrap(); assert_eq!(content, "Hello"); } + + #[gpui::test] + async fn test_rename(executor: BackgroundExecutor) { + let fs = FakeFs::new(executor.clone()); + fs.insert_tree( + path!("/root"), + json!({ + "src": { + "file_a.txt": "content a", + "file_b.txt": "content b" + } + }), + ) + .await; + + fs.rename( + Path::new(path!("/root/src/file_a.txt")), + Path::new(path!("/root/src/new/renamed_a.txt")), + RenameOptions { + create_parents: true, + ..Default::default() + }, + ) + .await + .unwrap(); + + // Assert that the `file_a.txt` file was being renamed and moved to a + // different directory that did not exist before. + assert_eq!( + fs.files(), + vec![ + PathBuf::from(path!("/root/src/file_b.txt")), + PathBuf::from(path!("/root/src/new/renamed_a.txt")), + ] + ); + + let result = fs + .rename( + Path::new(path!("/root/src/file_b.txt")), + Path::new(path!("/root/src/old/renamed_b.txt")), + RenameOptions { + create_parents: false, + ..Default::default() + }, + ) + .await; + + // Assert that the `file_b.txt` file was not renamed nor moved, as + // `create_parents` was set to `false`. + // different directory that did not exist before. + assert!(result.is_err()); + assert_eq!( + fs.files(), + vec![ + PathBuf::from(path!("/root/src/file_b.txt")), + PathBuf::from(path!("/root/src/new/renamed_a.txt")), + ] + ); + } } diff --git a/crates/project/src/agent_server_store.rs b/crates/project/src/agent_server_store.rs index d6bd83531eda515e6c2841c65d51619da82e9ae4..ef12e222009a59430a3396cae7971ac7593e82c3 100644 --- a/crates/project/src/agent_server_store.rs +++ b/crates/project/src/agent_server_store.rs @@ -1089,6 +1089,7 @@ async fn download_latest_version( RenameOptions { ignore_if_exists: true, overwrite: true, + create_parents: false, }, ) .await?; diff --git a/crates/project/src/lsp_store.rs b/crates/project/src/lsp_store.rs index 4f7022a264db18f96150c369fadb957556e33b75..a69d2553692277c7e10b203bf9edf075553d546c 100644 --- a/crates/project/src/lsp_store.rs +++ b/crates/project/src/lsp_store.rs @@ -3021,17 +3021,23 @@ impl LocalLspStore { .new_uri .to_file_path() .map_err(|()| anyhow!("can't convert URI to path"))?; - fs.rename( - &source_abs_path, - &target_abs_path, - op.options - .map(|options| fs::RenameOptions { - overwrite: options.overwrite.unwrap_or(false), - ignore_if_exists: options.ignore_if_exists.unwrap_or(false), - }) - .unwrap_or_default(), - ) - .await?; + + let options = fs::RenameOptions { + overwrite: op + .options + .as_ref() + .and_then(|options| options.overwrite) + .unwrap_or(false), + ignore_if_exists: op + .options + .as_ref() + .and_then(|options| options.ignore_if_exists) + .unwrap_or(false), + create_parents: true, + }; + + fs.rename(&source_abs_path, &target_abs_path, options) + .await?; } lsp::DocumentChangeOperation::Op(lsp::ResourceOp::Delete(op)) => { diff --git a/crates/worktree/src/worktree_tests.rs b/crates/worktree/src/worktree_tests.rs index e8d98b3508bd14f7ea8baaf1b985b42293eb078d..50e2c6acae0013a75e346ba754f9c9f861196b58 100644 --- a/crates/worktree/src/worktree_tests.rs +++ b/crates/worktree/src/worktree_tests.rs @@ -379,6 +379,7 @@ async fn test_renaming_case_only(cx: &mut TestAppContext) { fs::RenameOptions { overwrite: true, ignore_if_exists: true, + create_parents: false, }, ) .await @@ -1986,6 +1987,7 @@ async fn randomly_mutate_fs( fs::RenameOptions { overwrite: true, ignore_if_exists: true, + create_parents: false, }, ) .await From f89e5308e38c2c8e62f07dc0d641e61079569767 Mon Sep 17 00:00:00 2001 From: Agus Zubiaga Date: Wed, 26 Nov 2025 17:15:05 -0300 Subject: [PATCH 067/749] edit prediction: Report early-rejected predictions and fix cancel bug (#43585) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Many prediction requests end up being rejected early without ever being set as the current prediction. Before this change, those cases weren’t reported as rejections because the `request_prediction_with_*` functions simply returned `Ok(None)`. With this update, whenever we get a successful response from the provider, we will return at least the `id`, allowing it to be properly reported. The request now also includes a “reject reason,” since the different variants carry distinct implications for prediction quality. All of these scenarios are now covered by tests. While adding them, I also found and fixed a bug where some cancelled predictions were incorrectly being set as the current one. Release Notes: - N/A --------- Co-authored-by: MrSubidubi --- .../cloud_llm_client/src/cloud_llm_client.rs | 21 +- crates/zeta/src/prediction.rs | 89 +- crates/zeta/src/provider.rs | 13 +- crates/zeta/src/sweep_ai.rs | 10 +- crates/zeta/src/zeta.rs | 868 +++++++++++++++--- crates/zeta/src/zeta1.rs | 13 +- crates/zeta/src/zeta_tests.rs | 2 +- crates/zeta_cli/src/predict.rs | 5 +- 8 files changed, 842 insertions(+), 179 deletions(-) diff --git a/crates/cloud_llm_client/src/cloud_llm_client.rs b/crates/cloud_llm_client/src/cloud_llm_client.rs index 241e760887cdf0c4455f6769c79a813de0626028..15b5a4eda4f8473f48cc66d255598cc6c1d09f08 100644 --- a/crates/cloud_llm_client/src/cloud_llm_client.rs +++ b/crates/cloud_llm_client/src/cloud_llm_client.rs @@ -200,12 +200,31 @@ pub struct RejectEditPredictionsBody { pub rejections: Vec, } -#[derive(Debug, Clone, Serialize, Deserialize)] +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] pub struct EditPredictionRejection { pub request_id: String, + #[serde(default)] + pub reason: EditPredictionRejectReason, pub was_shown: bool, } +#[derive(Default, Debug, Clone, Copy, Serialize, Deserialize, PartialEq)] +pub enum EditPredictionRejectReason { + /// New requests were triggered before this one completed + Canceled, + /// No edits returned + Empty, + /// Edits returned, but none remained after interpolation + InterpolatedEmpty, + /// The new prediction was preferred over the current one + Replaced, + /// The current prediction was preferred over the new one + CurrentPreferred, + /// The current prediction was discarded + #[default] + Discarded, +} + #[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Clone, Copy, Serialize, Deserialize)] #[serde(rename_all = "snake_case")] pub enum CompletionMode { diff --git a/crates/zeta/src/prediction.rs b/crates/zeta/src/prediction.rs index 0125e739f335fc133cbff84dcd8b4c4bac3e6e7b..fd3241730030fe8bdd95e2cae9ee87b406ade735 100644 --- a/crates/zeta/src/prediction.rs +++ b/crates/zeta/src/prediction.rs @@ -5,6 +5,7 @@ use std::{ time::{Duration, Instant}, }; +use cloud_llm_client::EditPredictionRejectReason; use gpui::{AsyncApp, Entity, SharedString}; use language::{Anchor, Buffer, BufferSnapshot, EditPreview, OffsetRangeExt, TextBufferSnapshot}; use serde::Serialize; @@ -24,28 +25,13 @@ impl std::fmt::Display for EditPredictionId { } } -#[derive(Clone)] -pub struct EditPrediction { +/// A prediction response that was returned from the provider, whether it was ultimately valid or not. +pub struct EditPredictionResult { pub id: EditPredictionId, - pub edits: Arc<[(Range, Arc)]>, - pub snapshot: BufferSnapshot, - pub edit_preview: EditPreview, - // We keep a reference to the buffer so that we do not need to reload it from disk when applying the prediction. - pub buffer: Entity, - pub buffer_snapshotted_at: Instant, - pub response_received_at: Instant, - pub inputs: EditPredictionInputs, -} - -#[derive(Debug, Clone, Serialize)] -pub struct EditPredictionInputs { - pub events: Vec>, - pub included_files: Vec, - pub cursor_point: cloud_llm_client::predict_edits_v3::Point, - pub cursor_path: Arc, + pub prediction: Result, } -impl EditPrediction { +impl EditPredictionResult { pub async fn new( id: EditPredictionId, edited_buffer: &Entity, @@ -55,8 +41,15 @@ impl EditPrediction { response_received_at: Instant, inputs: EditPredictionInputs, cx: &mut AsyncApp, - ) -> Option { - let (edits, snapshot, edit_preview_task) = edited_buffer + ) -> Self { + if edits.is_empty() { + return Self { + id, + prediction: Err(EditPredictionRejectReason::Empty), + }; + } + + let Some((edits, snapshot, edit_preview_task)) = edited_buffer .read_with(cx, |buffer, cx| { let new_snapshot = buffer.snapshot(); let edits: Arc<[_]> = @@ -64,22 +57,54 @@ impl EditPrediction { Some((edits.clone(), new_snapshot, buffer.preview_edits(edits, cx))) }) - .ok()??; + .ok() + .flatten() + else { + return Self { + id, + prediction: Err(EditPredictionRejectReason::InterpolatedEmpty), + }; + }; let edit_preview = edit_preview_task.await; - Some(EditPrediction { - id, - edits, - snapshot, - edit_preview, - inputs, - buffer: edited_buffer.clone(), - buffer_snapshotted_at, - response_received_at, - }) + Self { + id: id.clone(), + prediction: Ok(EditPrediction { + id, + edits, + snapshot, + edit_preview, + inputs, + buffer: edited_buffer.clone(), + buffer_snapshotted_at, + response_received_at, + }), + } } +} +#[derive(Clone)] +pub struct EditPrediction { + pub id: EditPredictionId, + pub edits: Arc<[(Range, Arc)]>, + pub snapshot: BufferSnapshot, + pub edit_preview: EditPreview, + pub buffer: Entity, + pub buffer_snapshotted_at: Instant, + pub response_received_at: Instant, + pub inputs: EditPredictionInputs, +} + +#[derive(Debug, Clone, Serialize)] +pub struct EditPredictionInputs { + pub events: Vec>, + pub included_files: Vec, + pub cursor_point: cloud_llm_client::predict_edits_v3::Point, + pub cursor_path: Arc, +} + +impl EditPrediction { pub fn interpolate( &self, new_snapshot: &TextBufferSnapshot, diff --git a/crates/zeta/src/provider.rs b/crates/zeta/src/provider.rs index 76c950714afa808ea04cf5fead89979374f2b99b..b91df0963386543fbd1e8645e5893a35fe202cc5 100644 --- a/crates/zeta/src/provider.rs +++ b/crates/zeta/src/provider.rs @@ -1,6 +1,7 @@ use std::{cmp, sync::Arc, time::Duration}; use client::{Client, UserStore}; +use cloud_llm_client::EditPredictionRejectReason; use edit_prediction::{DataCollectionState, Direction, EditPredictionProvider}; use gpui::{App, Entity, prelude::*}; use language::ToPoint as _; @@ -132,7 +133,11 @@ impl EditPredictionProvider for ZetaEditPredictionProvider { fn discard(&mut self, cx: &mut Context) { self.zeta.update(cx, |zeta, cx| { - zeta.discard_current_prediction(&self.project, cx); + zeta.reject_current_prediction( + EditPredictionRejectReason::Discarded, + &self.project, + cx, + ); }); } @@ -169,7 +174,11 @@ impl EditPredictionProvider for ZetaEditPredictionProvider { let Some(edits) = prediction.interpolate(&snapshot) else { self.zeta.update(cx, |zeta, cx| { - zeta.discard_current_prediction(&self.project, cx); + zeta.reject_current_prediction( + EditPredictionRejectReason::InterpolatedEmpty, + &self.project, + cx, + ); }); return None; }; diff --git a/crates/zeta/src/sweep_ai.rs b/crates/zeta/src/sweep_ai.rs index c88dda2ae2fd11dd37965e58560df9e98528c9d9..f40e9711f231523174a2d2edbd9fe1adb14ad498 100644 --- a/crates/zeta/src/sweep_ai.rs +++ b/crates/zeta/src/sweep_ai.rs @@ -18,7 +18,7 @@ use std::{ time::Instant, }; -use crate::{EditPrediction, EditPredictionId, EditPredictionInputs}; +use crate::{EditPredictionId, EditPredictionInputs, prediction::EditPredictionResult}; const SWEEP_API_URL: &str = "https://autocomplete.sweep.dev/backend/next_edit_autocomplete"; @@ -45,7 +45,7 @@ impl SweepAi { recent_paths: &VecDeque, diagnostic_search_range: Range, cx: &mut App, - ) -> Task>> { + ) -> Task>> { let debug_info = self.debug_info.clone(); let Some(api_token) = self.api_token.clone() else { return Task::ready(Ok(None)); @@ -242,8 +242,8 @@ impl SweepAi { cx.spawn(async move |cx| { let (id, edits, old_snapshot, response_received_at, inputs) = result.await?; - anyhow::Ok( - EditPrediction::new( + anyhow::Ok(Some( + EditPredictionResult::new( EditPredictionId(id.into()), &buffer, &old_snapshot, @@ -254,7 +254,7 @@ impl SweepAi { cx, ) .await, - ) + )) }) } } diff --git a/crates/zeta/src/zeta.rs b/crates/zeta/src/zeta.rs index 26a2388a96e4a828fc4c7bd6fe5d3dbb57bfc911..5cf0191e2f8180ea7bcfbef07c046372d2ee22c9 100644 --- a/crates/zeta/src/zeta.rs +++ b/crates/zeta/src/zeta.rs @@ -3,9 +3,9 @@ use arrayvec::ArrayVec; use client::{Client, EditPredictionUsage, UserStore}; use cloud_llm_client::predict_edits_v3::{self, Event, PromptFormat, Signature}; use cloud_llm_client::{ - AcceptEditPredictionBody, EXPIRED_LLM_TOKEN_HEADER_NAME, EditPredictionRejection, - MAX_EDIT_PREDICTION_REJECTIONS_PER_REQUEST, MINIMUM_REQUIRED_VERSION_HEADER_NAME, - RejectEditPredictionsBody, ZED_VERSION_HEADER_NAME, + AcceptEditPredictionBody, EXPIRED_LLM_TOKEN_HEADER_NAME, EditPredictionRejectReason, + EditPredictionRejection, MAX_EDIT_PREDICTION_REJECTIONS_PER_REQUEST, + MINIMUM_REQUIRED_VERSION_HEADER_NAME, RejectEditPredictionsBody, ZED_VERSION_HEADER_NAME, }; use cloud_zeta2_prompt::retrieval_prompt::{SearchToolInput, SearchToolQuery}; use cloud_zeta2_prompt::{CURSOR_MARKER, DEFAULT_MAX_PROMPT_BYTES}; @@ -74,6 +74,7 @@ use crate::onboarding_modal::ZedPredictModal; pub use crate::prediction::EditPrediction; pub use crate::prediction::EditPredictionId; pub use crate::prediction::EditPredictionInputs; +use crate::prediction::EditPredictionResult; use crate::rate_prediction_modal::{ NextEdit, PreviousEdit, RatePredictionsModal, ThumbsDownActivePrediction, ThumbsUpActivePrediction, @@ -310,6 +311,31 @@ impl ZetaProject { ) .collect() } + + fn cancel_pending_prediction( + &mut self, + pending_prediction: PendingPrediction, + cx: &mut Context, + ) { + self.cancelled_predictions.insert(pending_prediction.id); + + cx.spawn(async move |this, cx| { + let Some(prediction_id) = pending_prediction.task.await else { + return; + }; + + this.update(cx, |this, cx| { + this.reject_prediction( + prediction_id, + EditPredictionRejectReason::Canceled, + false, + cx, + ); + }) + .ok(); + }) + .detach() + } } #[derive(Debug, Clone)] @@ -373,6 +399,7 @@ impl PredictionRequestedBy { } } +#[derive(Debug)] struct PendingPrediction { id: usize, task: Task>, @@ -385,6 +412,18 @@ enum BufferEditPrediction<'a> { Jump { prediction: &'a EditPrediction }, } +#[cfg(test)] +impl std::ops::Deref for BufferEditPrediction<'_> { + type Target = EditPrediction; + + fn deref(&self) -> &Self::Target { + match self { + BufferEditPrediction::Local { prediction } => prediction, + BufferEditPrediction::Jump { prediction } => prediction, + } + } +} + struct RegisteredBuffer { snapshot: BufferSnapshot, _subscriptions: [gpui::Subscription; 2], @@ -467,7 +506,7 @@ impl Zeta { let (reject_tx, mut reject_rx) = mpsc::unbounded(); cx.spawn(async move |this, cx| { while let Some(()) = reject_rx.next().await { - this.update(cx, |this, cx| this.reject_edit_predictions(cx))? + this.update(cx, |this, cx| this.flush_rejected_predictions(cx))? .await .log_err(); } @@ -818,7 +857,7 @@ impl Zeta { }; let request_id = prediction.prediction.id.to_string(); for pending_prediction in mem::take(&mut project_state.pending_predictions) { - self.cancel_pending_prediction(pending_prediction, cx); + project_state.cancel_pending_prediction(pending_prediction, cx); } let client = self.client.clone(); @@ -856,7 +895,7 @@ impl Zeta { .detach_and_log_err(cx); } - fn reject_edit_predictions(&mut self, cx: &mut Context) -> Task> { + fn flush_rejected_predictions(&mut self, cx: &mut Context) -> Task> { match self.edit_prediction_model { ZetaEditPredictionModel::Zeta1 | ZetaEditPredictionModel::Zeta2 => {} ZetaEditPredictionModel::Sweep => return Task::ready(anyhow::Ok(())), @@ -904,11 +943,16 @@ impl Zeta { }) } - fn discard_current_prediction(&mut self, project: &Entity, cx: &mut Context) { + fn reject_current_prediction( + &mut self, + reason: EditPredictionRejectReason, + project: &Entity, + cx: &mut Context, + ) { if let Some(project_state) = self.projects.get_mut(&project.entity_id()) { project_state.pending_predictions.clear(); if let Some(prediction) = project_state.current_prediction.take() { - self.discard_prediction(prediction.prediction.id, prediction.was_shown, cx); + self.reject_prediction(prediction.prediction.id, reason, prediction.was_shown, cx); } }; } @@ -929,14 +973,16 @@ impl Zeta { } } - fn discard_prediction( + fn reject_prediction( &mut self, prediction_id: EditPredictionId, + reason: EditPredictionRejectReason, was_shown: bool, cx: &mut Context, ) { self.rejected_predictions.push(EditPredictionRejection { request_id: prediction_id.to_string(), + reason, was_shown, }); @@ -944,34 +990,16 @@ impl Zeta { self.rejected_predictions.len() >= MAX_EDIT_PREDICTION_REJECTIONS_PER_REQUEST; let reject_tx = self.reject_predictions_tx.clone(); self.reject_predictions_debounce_task = Some(cx.spawn(async move |_this, cx| { - const DISCARD_COMPLETIONS_DEBOUNCE: Duration = Duration::from_secs(15); + const REJECT_REQUEST_DEBOUNCE: Duration = Duration::from_secs(15); if !reached_request_limit { cx.background_executor() - .timer(DISCARD_COMPLETIONS_DEBOUNCE) + .timer(REJECT_REQUEST_DEBOUNCE) .await; } reject_tx.unbounded_send(()).log_err(); })); } - fn cancel_pending_prediction( - &self, - pending_prediction: PendingPrediction, - cx: &mut Context, - ) { - cx.spawn(async move |this, cx| { - let Some(prediction_id) = pending_prediction.task.await else { - return; - }; - - this.update(cx, |this, cx| { - this.discard_prediction(prediction_id, false, cx); - }) - .ok(); - }) - .detach() - } - fn is_refreshing(&self, project: &Entity) -> bool { self.projects .get(&project.entity_id()) @@ -995,38 +1023,15 @@ impl Zeta { return Task::ready(anyhow::Ok(None)); }; - let project = project.clone(); - cx.spawn(async move |cx| { - if let Some(prediction) = request_task.await? { - let id = prediction.id.clone(); - this.update(cx, |this, cx| { - let project_state = this - .projects - .get_mut(&project.entity_id()) - .context("Project not found")?; - - let new_prediction = CurrentEditPrediction { - requested_by: PredictionRequestedBy::Buffer(buffer.entity_id()), - prediction: prediction, - was_shown: false, - }; - - if project_state - .current_prediction - .as_ref() - .is_none_or(|old_prediction| { - new_prediction.should_replace_prediction(&old_prediction, cx) - }) - { - project_state.current_prediction = Some(new_prediction); - cx.notify(); - } - anyhow::Ok(()) - })??; - Ok(Some(id)) - } else { - Ok(None) - } + cx.spawn(async move |_cx| { + request_task.await.map(|prediction_result| { + prediction_result.map(|prediction_result| { + ( + prediction_result, + PredictionRequestedBy::Buffer(buffer.entity_id()), + ) + }) + }) }) }) } @@ -1076,7 +1081,7 @@ impl Zeta { return anyhow::Ok(None); }; - let Some(prediction) = this + let Some(prediction_result) = this .update(cx, |this, cx| { this.request_prediction(&project, &jump_buffer, jump_position, cx) })? @@ -1085,21 +1090,23 @@ impl Zeta { return anyhow::Ok(None); }; - let id = prediction.id.clone(); this.update(cx, |this, cx| { - if let Some(zeta_project) = this.projects.get_mut(&project.entity_id()) { - zeta_project.current_prediction.get_or_insert_with(|| { - cx.notify(); - CurrentEditPrediction { - requested_by: PredictionRequestedBy::DiagnosticsUpdate, - prediction, - was_shown: false, + Some(( + if this + .get_or_init_zeta_project(&project, cx) + .current_prediction + .is_none() + { + prediction_result + } else { + EditPredictionResult { + id: prediction_result.id, + prediction: Err(EditPredictionRejectReason::CurrentPreferred), } - }); - } - })?; - - anyhow::Ok(Some(id)) + }, + PredictionRequestedBy::DiagnosticsUpdate, + )) + }) }) }); } @@ -1117,7 +1124,8 @@ impl Zeta { do_refresh: impl FnOnce( WeakEntity, &mut AsyncApp, - ) -> Task>> + ) + -> Task>> + 'static, ) { let zeta_project = self.get_or_init_zeta_project(&project, cx); @@ -1152,22 +1160,77 @@ impl Zeta { return None; } - let edit_prediction_id = do_refresh(this.clone(), cx).await.log_err().flatten(); + let new_prediction_result = do_refresh(this.clone(), cx).await.log_err().flatten(); + let new_prediction_id = new_prediction_result + .as_ref() + .map(|(prediction, _)| prediction.id.clone()); // When a prediction completes, remove it from the pending list, and cancel // any pending predictions that were enqueued before it. this.update(cx, |this, cx| { let zeta_project = this.get_or_init_zeta_project(&project, cx); - zeta_project + + let is_cancelled = zeta_project .cancelled_predictions .remove(&pending_prediction_id); + let new_current_prediction = if !is_cancelled + && let Some((prediction_result, requested_by)) = new_prediction_result + { + match prediction_result.prediction { + Ok(prediction) => { + let new_prediction = CurrentEditPrediction { + requested_by, + prediction, + was_shown: false, + }; + + if let Some(current_prediction) = + zeta_project.current_prediction.as_ref() + { + if new_prediction.should_replace_prediction(¤t_prediction, cx) + { + this.reject_current_prediction( + EditPredictionRejectReason::Replaced, + &project, + cx, + ); + + Some(new_prediction) + } else { + this.reject_prediction( + new_prediction.prediction.id, + EditPredictionRejectReason::CurrentPreferred, + false, + cx, + ); + None + } + } else { + Some(new_prediction) + } + } + Err(reject_reason) => { + this.reject_prediction(prediction_result.id, reject_reason, false, cx); + None + } + } + } else { + None + }; + + let zeta_project = this.get_or_init_zeta_project(&project, cx); + + if let Some(new_prediction) = new_current_prediction { + zeta_project.current_prediction = Some(new_prediction); + } + let mut pending_predictions = mem::take(&mut zeta_project.pending_predictions); for (ix, pending_prediction) in pending_predictions.iter().enumerate() { if pending_prediction.id == pending_prediction_id { pending_predictions.remove(ix); for pending_prediction in pending_predictions.drain(0..ix) { - this.cancel_pending_prediction(pending_prediction, cx) + zeta_project.cancel_pending_prediction(pending_prediction, cx) } break; } @@ -1178,7 +1241,7 @@ impl Zeta { }) .ok(); - edit_prediction_id + new_prediction_id }); if zeta_project.pending_predictions.len() <= 1 { @@ -1192,10 +1255,7 @@ impl Zeta { id: pending_prediction_id, task, }); - zeta_project - .cancelled_predictions - .insert(pending_prediction.id); - self.cancel_pending_prediction(pending_prediction, cx); + zeta_project.cancel_pending_prediction(pending_prediction, cx); } } @@ -1205,7 +1265,7 @@ impl Zeta { active_buffer: &Entity, position: language::Anchor, cx: &mut Context, - ) -> Task>> { + ) -> Task>> { self.request_prediction_internal( project.clone(), active_buffer.clone(), @@ -1222,7 +1282,7 @@ impl Zeta { position: language::Anchor, allow_jump: bool, cx: &mut Context, - ) -> Task>> { + ) -> Task>> { const DIAGNOSTIC_LINES_RANGE: u32 = 20; self.get_or_init_zeta_project(&project, cx); @@ -1268,9 +1328,7 @@ impl Zeta { }; cx.spawn(async move |this, cx| { - let prediction = task - .await? - .filter(|prediction| !prediction.edits.is_empty()); + let prediction = task.await?; if prediction.is_none() && allow_jump { let cursor_point = position.to_point(&snapshot); @@ -1392,7 +1450,7 @@ impl Zeta { position: language::Anchor, events: Vec>, cx: &mut Context, - ) -> Task>> { + ) -> Task>> { let project_state = self.projects.get(&project.entity_id()); let index_state = project_state.and_then(|state| { @@ -1689,7 +1747,7 @@ impl Zeta { let (res, usage) = response?; let request_id = EditPredictionId(res.id.clone().into()); let Some(mut output_text) = text_from_response(res) else { - return Ok((None, usage)); + return Ok((Some((request_id, None)), usage)); }; if output_text.contains(CURSOR_MARKER) { @@ -1747,11 +1805,13 @@ impl Zeta { anyhow::Ok(( Some(( request_id, - inputs, - edited_buffer, - edited_buffer_snapshot.clone(), - edits, - received_response_at, + Some(( + inputs, + edited_buffer, + edited_buffer_snapshot.clone(), + edits, + received_response_at, + )), )), usage, )) @@ -1760,30 +1820,40 @@ impl Zeta { cx.spawn({ async move |this, cx| { + let Some((id, prediction)) = + Self::handle_api_response(&this, request_task.await, cx)? + else { + return Ok(None); + }; + let Some(( - id, inputs, edited_buffer, edited_buffer_snapshot, edits, received_response_at, - )) = Self::handle_api_response(&this, request_task.await, cx)? + )) = prediction else { - return Ok(None); + return Ok(Some(EditPredictionResult { + id, + prediction: Err(EditPredictionRejectReason::Empty), + })); }; // TODO telemetry: duration, etc - Ok(EditPrediction::new( - id, - &edited_buffer, - &edited_buffer_snapshot, - edits.into(), - buffer_snapshotted_at, - received_response_at, - inputs, - cx, - ) - .await) + Ok(Some( + EditPredictionResult::new( + id, + &edited_buffer, + &edited_buffer_snapshot, + edits.into(), + buffer_snapshotted_at, + received_response_at, + inputs, + cx, + ) + .await, + )) } }) } @@ -2806,6 +2876,9 @@ mod tests { use client::UserStore; use clock::FakeSystemClock; + use cloud_llm_client::{ + EditPredictionRejectReason, EditPredictionRejection, RejectEditPredictionsBody, + }; use cloud_zeta2_prompt::retrieval_prompt::{SearchToolInput, SearchToolQuery}; use futures::{ AsyncReadExt, StreamExt, @@ -2830,7 +2903,7 @@ mod tests { #[gpui::test] async fn test_current_state(cx: &mut TestAppContext) { - let (zeta, mut req_rx) = init_test(cx); + let (zeta, mut requests) = init_test(cx); let fs = FakeFs::new(cx.executor()); fs.insert_tree( "/root", @@ -2861,7 +2934,7 @@ mod tests { zeta.update(cx, |zeta, cx| { zeta.refresh_prediction_from_buffer(project.clone(), buffer1.clone(), position, cx) }); - let (_request, respond_tx) = req_rx.next().await.unwrap(); + let (_request, respond_tx) = requests.predict.next().await.unwrap(); respond_tx .send(model_response(indoc! {r" @@ -2888,7 +2961,7 @@ mod tests { let refresh_task = zeta.update(cx, |zeta, cx| { zeta.refresh_context(project.clone(), buffer1.clone(), position, cx) }); - let (_request, respond_tx) = req_rx.next().await.unwrap(); + let (_request, respond_tx) = requests.predict.next().await.unwrap(); respond_tx .send(open_ai::Response { id: Uuid::new_v4().to_string(), @@ -2929,14 +3002,14 @@ mod tests { refresh_task.await.unwrap(); zeta.update(cx, |zeta, cx| { - zeta.discard_current_prediction(&project, cx); + zeta.reject_current_prediction(EditPredictionRejectReason::Discarded, &project, cx); }); // Prediction for another file zeta.update(cx, |zeta, cx| { zeta.refresh_prediction_from_buffer(project.clone(), buffer1.clone(), position, cx) }); - let (_request, respond_tx) = req_rx.next().await.unwrap(); + let (_request, respond_tx) = requests.predict.next().await.unwrap(); respond_tx .send(model_response(indoc! {r#" --- a/root/2.txt @@ -2977,7 +3050,7 @@ mod tests { #[gpui::test] async fn test_simple_request(cx: &mut TestAppContext) { - let (zeta, mut req_rx) = init_test(cx); + let (zeta, mut requests) = init_test(cx); let fs = FakeFs::new(cx.executor()); fs.insert_tree( "/root", @@ -3002,7 +3075,7 @@ mod tests { zeta.request_prediction(&project, &buffer, position, cx) }); - let (_, respond_tx) = req_rx.next().await.unwrap(); + let (_, respond_tx) = requests.predict.next().await.unwrap(); // TODO Put back when we have a structured request again // assert_eq!( @@ -3029,7 +3102,7 @@ mod tests { "})) .unwrap(); - let prediction = prediction_task.await.unwrap().unwrap(); + let prediction = prediction_task.await.unwrap().unwrap().prediction.unwrap(); assert_eq!(prediction.edits.len(), 1); assert_eq!( @@ -3041,7 +3114,7 @@ mod tests { #[gpui::test] async fn test_request_events(cx: &mut TestAppContext) { - let (zeta, mut req_rx) = init_test(cx); + let (zeta, mut requests) = init_test(cx); let fs = FakeFs::new(cx.executor()); fs.insert_tree( "/root", @@ -3075,7 +3148,7 @@ mod tests { zeta.request_prediction(&project, &buffer, position, cx) }); - let (request, respond_tx) = req_rx.next().await.unwrap(); + let (request, respond_tx) = requests.predict.next().await.unwrap(); let prompt = prompt_from_request(&request); assert!( @@ -3103,7 +3176,7 @@ mod tests { "#})) .unwrap(); - let prediction = prediction_task.await.unwrap().unwrap(); + let prediction = prediction_task.await.unwrap().unwrap().prediction.unwrap(); assert_eq!(prediction.edits.len(), 1); assert_eq!( @@ -3113,6 +3186,522 @@ mod tests { assert_eq!(prediction.edits[0].1.as_ref(), " are you?"); } + #[gpui::test] + async fn test_empty_prediction(cx: &mut TestAppContext) { + let (zeta, mut requests) = init_test(cx); + let fs = FakeFs::new(cx.executor()); + fs.insert_tree( + "/root", + json!({ + "foo.md": "Hello!\nHow\nBye\n" + }), + ) + .await; + let project = Project::test(fs, vec![path!("/root").as_ref()], cx).await; + + let buffer = project + .update(cx, |project, cx| { + let path = project.find_project_path(path!("root/foo.md"), cx).unwrap(); + project.open_buffer(path, cx) + }) + .await + .unwrap(); + let snapshot = buffer.read_with(cx, |buffer, _cx| buffer.snapshot()); + let position = snapshot.anchor_before(language::Point::new(1, 3)); + + zeta.update(cx, |zeta, cx| { + zeta.refresh_prediction_from_buffer(project.clone(), buffer.clone(), position, cx); + }); + + const NO_OP_DIFF: &str = indoc! { r" + --- a/root/foo.md + +++ b/root/foo.md + @@ ... @@ + Hello! + -How + +How + Bye + "}; + + let (_, respond_tx) = requests.predict.next().await.unwrap(); + let response = model_response(NO_OP_DIFF); + let id = response.id.clone(); + respond_tx.send(response).unwrap(); + + cx.run_until_parked(); + + zeta.read_with(cx, |zeta, cx| { + assert!( + zeta.current_prediction_for_buffer(&buffer, &project, cx) + .is_none() + ); + }); + + // prediction is reported as rejected + let (reject_request, _) = requests.reject.next().await.unwrap(); + + assert_eq!( + &reject_request.rejections, + &[EditPredictionRejection { + request_id: id, + reason: EditPredictionRejectReason::Empty, + was_shown: false + }] + ); + } + + #[gpui::test] + async fn test_interpolated_empty(cx: &mut TestAppContext) { + let (zeta, mut requests) = init_test(cx); + let fs = FakeFs::new(cx.executor()); + fs.insert_tree( + "/root", + json!({ + "foo.md": "Hello!\nHow\nBye\n" + }), + ) + .await; + let project = Project::test(fs, vec![path!("/root").as_ref()], cx).await; + + let buffer = project + .update(cx, |project, cx| { + let path = project.find_project_path(path!("root/foo.md"), cx).unwrap(); + project.open_buffer(path, cx) + }) + .await + .unwrap(); + let snapshot = buffer.read_with(cx, |buffer, _cx| buffer.snapshot()); + let position = snapshot.anchor_before(language::Point::new(1, 3)); + + zeta.update(cx, |zeta, cx| { + zeta.refresh_prediction_from_buffer(project.clone(), buffer.clone(), position, cx); + }); + + let (_, respond_tx) = requests.predict.next().await.unwrap(); + + buffer.update(cx, |buffer, cx| { + buffer.set_text("Hello!\nHow are you?\nBye", cx); + }); + + let response = model_response(SIMPLE_DIFF); + let id = response.id.clone(); + respond_tx.send(response).unwrap(); + + cx.run_until_parked(); + + zeta.read_with(cx, |zeta, cx| { + assert!( + zeta.current_prediction_for_buffer(&buffer, &project, cx) + .is_none() + ); + }); + + // prediction is reported as rejected + let (reject_request, _) = requests.reject.next().await.unwrap(); + + assert_eq!( + &reject_request.rejections, + &[EditPredictionRejection { + request_id: id, + reason: EditPredictionRejectReason::InterpolatedEmpty, + was_shown: false + }] + ); + } + + const SIMPLE_DIFF: &str = indoc! { r" + --- a/root/foo.md + +++ b/root/foo.md + @@ ... @@ + Hello! + -How + +How are you? + Bye + "}; + + #[gpui::test] + async fn test_replace_current(cx: &mut TestAppContext) { + let (zeta, mut requests) = init_test(cx); + let fs = FakeFs::new(cx.executor()); + fs.insert_tree( + "/root", + json!({ + "foo.md": "Hello!\nHow\nBye\n" + }), + ) + .await; + let project = Project::test(fs, vec![path!("/root").as_ref()], cx).await; + + let buffer = project + .update(cx, |project, cx| { + let path = project.find_project_path(path!("root/foo.md"), cx).unwrap(); + project.open_buffer(path, cx) + }) + .await + .unwrap(); + let snapshot = buffer.read_with(cx, |buffer, _cx| buffer.snapshot()); + let position = snapshot.anchor_before(language::Point::new(1, 3)); + + zeta.update(cx, |zeta, cx| { + zeta.refresh_prediction_from_buffer(project.clone(), buffer.clone(), position, cx); + }); + + let (_, respond_tx) = requests.predict.next().await.unwrap(); + let first_response = model_response(SIMPLE_DIFF); + let first_id = first_response.id.clone(); + respond_tx.send(first_response).unwrap(); + + cx.run_until_parked(); + + zeta.read_with(cx, |zeta, cx| { + assert_eq!( + zeta.current_prediction_for_buffer(&buffer, &project, cx) + .unwrap() + .id + .0, + first_id + ); + }); + + // a second request is triggered + zeta.update(cx, |zeta, cx| { + zeta.refresh_prediction_from_buffer(project.clone(), buffer.clone(), position, cx); + }); + + let (_, respond_tx) = requests.predict.next().await.unwrap(); + let second_response = model_response(SIMPLE_DIFF); + let second_id = second_response.id.clone(); + respond_tx.send(second_response).unwrap(); + + cx.run_until_parked(); + + zeta.read_with(cx, |zeta, cx| { + // second replaces first + assert_eq!( + zeta.current_prediction_for_buffer(&buffer, &project, cx) + .unwrap() + .id + .0, + second_id + ); + }); + + // first is reported as replaced + let (reject_request, _) = requests.reject.next().await.unwrap(); + + assert_eq!( + &reject_request.rejections, + &[EditPredictionRejection { + request_id: first_id, + reason: EditPredictionRejectReason::Replaced, + was_shown: false + }] + ); + } + + #[gpui::test] + async fn test_current_preferred(cx: &mut TestAppContext) { + let (zeta, mut requests) = init_test(cx); + let fs = FakeFs::new(cx.executor()); + fs.insert_tree( + "/root", + json!({ + "foo.md": "Hello!\nHow\nBye\n" + }), + ) + .await; + let project = Project::test(fs, vec![path!("/root").as_ref()], cx).await; + + let buffer = project + .update(cx, |project, cx| { + let path = project.find_project_path(path!("root/foo.md"), cx).unwrap(); + project.open_buffer(path, cx) + }) + .await + .unwrap(); + let snapshot = buffer.read_with(cx, |buffer, _cx| buffer.snapshot()); + let position = snapshot.anchor_before(language::Point::new(1, 3)); + + zeta.update(cx, |zeta, cx| { + zeta.refresh_prediction_from_buffer(project.clone(), buffer.clone(), position, cx); + }); + + let (_, respond_tx) = requests.predict.next().await.unwrap(); + let first_response = model_response(SIMPLE_DIFF); + let first_id = first_response.id.clone(); + respond_tx.send(first_response).unwrap(); + + cx.run_until_parked(); + + zeta.read_with(cx, |zeta, cx| { + assert_eq!( + zeta.current_prediction_for_buffer(&buffer, &project, cx) + .unwrap() + .id + .0, + first_id + ); + }); + + // a second request is triggered + zeta.update(cx, |zeta, cx| { + zeta.refresh_prediction_from_buffer(project.clone(), buffer.clone(), position, cx); + }); + + let (_, respond_tx) = requests.predict.next().await.unwrap(); + // worse than current prediction + let second_response = model_response(indoc! { r" + --- a/root/foo.md + +++ b/root/foo.md + @@ ... @@ + Hello! + -How + +How are + Bye + "}); + let second_id = second_response.id.clone(); + respond_tx.send(second_response).unwrap(); + + cx.run_until_parked(); + + zeta.read_with(cx, |zeta, cx| { + // first is preferred over second + assert_eq!( + zeta.current_prediction_for_buffer(&buffer, &project, cx) + .unwrap() + .id + .0, + first_id + ); + }); + + // second is reported as rejected + let (reject_request, _) = requests.reject.next().await.unwrap(); + + assert_eq!( + &reject_request.rejections, + &[EditPredictionRejection { + request_id: second_id, + reason: EditPredictionRejectReason::CurrentPreferred, + was_shown: false + }] + ); + } + + #[gpui::test] + async fn test_cancel_earlier_pending_requests(cx: &mut TestAppContext) { + let (zeta, mut requests) = init_test(cx); + let fs = FakeFs::new(cx.executor()); + fs.insert_tree( + "/root", + json!({ + "foo.md": "Hello!\nHow\nBye\n" + }), + ) + .await; + let project = Project::test(fs, vec![path!("/root").as_ref()], cx).await; + + let buffer = project + .update(cx, |project, cx| { + let path = project.find_project_path(path!("root/foo.md"), cx).unwrap(); + project.open_buffer(path, cx) + }) + .await + .unwrap(); + let snapshot = buffer.read_with(cx, |buffer, _cx| buffer.snapshot()); + let position = snapshot.anchor_before(language::Point::new(1, 3)); + + zeta.update(cx, |zeta, cx| { + // start two refresh tasks + zeta.refresh_prediction_from_buffer(project.clone(), buffer.clone(), position, cx); + + zeta.refresh_prediction_from_buffer(project.clone(), buffer.clone(), position, cx); + }); + + let (_, respond_first) = requests.predict.next().await.unwrap(); + let (_, respond_second) = requests.predict.next().await.unwrap(); + + // wait for throttle + cx.run_until_parked(); + + // second responds first + let second_response = model_response(SIMPLE_DIFF); + let second_id = second_response.id.clone(); + respond_second.send(second_response).unwrap(); + + cx.run_until_parked(); + + zeta.read_with(cx, |zeta, cx| { + // current prediction is second + assert_eq!( + zeta.current_prediction_for_buffer(&buffer, &project, cx) + .unwrap() + .id + .0, + second_id + ); + }); + + let first_response = model_response(SIMPLE_DIFF); + let first_id = first_response.id.clone(); + respond_first.send(first_response).unwrap(); + + cx.run_until_parked(); + + zeta.read_with(cx, |zeta, cx| { + // current prediction is still second, since first was cancelled + assert_eq!( + zeta.current_prediction_for_buffer(&buffer, &project, cx) + .unwrap() + .id + .0, + second_id + ); + }); + + // first is reported as rejected + let (reject_request, _) = requests.reject.next().await.unwrap(); + + cx.run_until_parked(); + + assert_eq!( + &reject_request.rejections, + &[EditPredictionRejection { + request_id: first_id, + reason: EditPredictionRejectReason::Canceled, + was_shown: false + }] + ); + } + + #[gpui::test] + async fn test_cancel_second_on_third_request(cx: &mut TestAppContext) { + let (zeta, mut requests) = init_test(cx); + let fs = FakeFs::new(cx.executor()); + fs.insert_tree( + "/root", + json!({ + "foo.md": "Hello!\nHow\nBye\n" + }), + ) + .await; + let project = Project::test(fs, vec![path!("/root").as_ref()], cx).await; + + let buffer = project + .update(cx, |project, cx| { + let path = project.find_project_path(path!("root/foo.md"), cx).unwrap(); + project.open_buffer(path, cx) + }) + .await + .unwrap(); + let snapshot = buffer.read_with(cx, |buffer, _cx| buffer.snapshot()); + let position = snapshot.anchor_before(language::Point::new(1, 3)); + + zeta.update(cx, |zeta, cx| { + // start two refresh tasks + zeta.refresh_prediction_from_buffer(project.clone(), buffer.clone(), position, cx); + zeta.refresh_prediction_from_buffer(project.clone(), buffer.clone(), position, cx); + }); + + // wait for throttle, so requests are sent + cx.run_until_parked(); + + let (_, respond_first) = requests.predict.next().await.unwrap(); + let (_, respond_second) = requests.predict.next().await.unwrap(); + + zeta.update(cx, |zeta, cx| { + // start a third request + zeta.refresh_prediction_from_buffer(project.clone(), buffer.clone(), position, cx); + + // 2 are pending, so 2nd is cancelled + assert_eq!( + zeta.get_or_init_zeta_project(&project, cx) + .cancelled_predictions + .iter() + .copied() + .collect::>(), + [1] + ); + }); + + // wait for throttle + cx.run_until_parked(); + + let (_, respond_third) = requests.predict.next().await.unwrap(); + + let first_response = model_response(SIMPLE_DIFF); + let first_id = first_response.id.clone(); + respond_first.send(first_response).unwrap(); + + cx.run_until_parked(); + + zeta.read_with(cx, |zeta, cx| { + // current prediction is first + assert_eq!( + zeta.current_prediction_for_buffer(&buffer, &project, cx) + .unwrap() + .id + .0, + first_id + ); + }); + + let cancelled_response = model_response(SIMPLE_DIFF); + let cancelled_id = cancelled_response.id.clone(); + respond_second.send(cancelled_response).unwrap(); + + cx.run_until_parked(); + + zeta.read_with(cx, |zeta, cx| { + // current prediction is still first, since second was cancelled + assert_eq!( + zeta.current_prediction_for_buffer(&buffer, &project, cx) + .unwrap() + .id + .0, + first_id + ); + }); + + let third_response = model_response(SIMPLE_DIFF); + let third_response_id = third_response.id.clone(); + respond_third.send(third_response).unwrap(); + + cx.run_until_parked(); + + zeta.read_with(cx, |zeta, cx| { + // third completes and replaces first + assert_eq!( + zeta.current_prediction_for_buffer(&buffer, &project, cx) + .unwrap() + .id + .0, + third_response_id + ); + }); + + // second is reported as rejected + let (reject_request, _) = requests.reject.next().await.unwrap(); + + cx.run_until_parked(); + + assert_eq!( + &reject_request.rejections, + &[ + EditPredictionRejection { + request_id: cancelled_id, + reason: EditPredictionRejectReason::Canceled, + was_shown: false + }, + EditPredictionRejection { + request_id: first_id, + reason: EditPredictionRejectReason::Replaced, + was_shown: false + } + ] + ); + } + // Skipped until we start including diagnostics in prompt // #[gpui::test] // async fn test_request_diagnostics(cx: &mut TestAppContext) { @@ -3242,24 +3831,26 @@ mod tests { content } - fn init_test( - cx: &mut TestAppContext, - ) -> ( - Entity, - mpsc::UnboundedReceiver<(open_ai::Request, oneshot::Sender)>, - ) { + struct RequestChannels { + predict: mpsc::UnboundedReceiver<(open_ai::Request, oneshot::Sender)>, + reject: mpsc::UnboundedReceiver<(RejectEditPredictionsBody, oneshot::Sender<()>)>, + } + + fn init_test(cx: &mut TestAppContext) -> (Entity, RequestChannels) { cx.update(move |cx| { let settings_store = SettingsStore::test(cx); cx.set_global(settings_store); zlog::init_test(); - let (req_tx, req_rx) = mpsc::unbounded(); + let (predict_req_tx, predict_req_rx) = mpsc::unbounded(); + let (reject_req_tx, reject_req_rx) = mpsc::unbounded(); let http_client = FakeHttpClient::create({ move |req| { let uri = req.uri().path().to_string(); let mut body = req.into_body(); - let req_tx = req_tx.clone(); + let predict_req_tx = predict_req_tx.clone(); + let reject_req_tx = reject_req_tx.clone(); async move { let resp = match uri.as_str() { "/client/llm_tokens" => serde_json::to_string(&json!({ @@ -3272,7 +3863,16 @@ mod tests { let req = serde_json::from_slice(&buf).unwrap(); let (res_tx, res_rx) = oneshot::channel(); - req_tx.unbounded_send((req, res_tx)).unwrap(); + predict_req_tx.unbounded_send((req, res_tx)).unwrap(); + serde_json::to_string(&res_rx.await?).unwrap() + } + "/predict_edits/reject" => { + let mut buf = Vec::new(); + body.read_to_end(&mut buf).await.ok(); + let req = serde_json::from_slice(&buf).unwrap(); + + let (res_tx, res_rx) = oneshot::channel(); + reject_req_tx.unbounded_send((req, res_tx)).unwrap(); serde_json::to_string(&res_rx.await?).unwrap() } _ => { @@ -3293,7 +3893,13 @@ mod tests { let user_store = cx.new(|cx| UserStore::new(client.clone(), cx)); let zeta = Zeta::global(&client, &user_store, cx); - (zeta, req_rx) + ( + zeta, + RequestChannels { + predict: predict_req_rx, + reject: reject_req_rx, + }, + ) }) } } diff --git a/crates/zeta/src/zeta1.rs b/crates/zeta/src/zeta1.rs index 7f80d60d5efcbbd0bd7b9426508c344c063d5597..96d175d5eb11c2c8be40779cf77bfb743d39dff6 100644 --- a/crates/zeta/src/zeta1.rs +++ b/crates/zeta/src/zeta1.rs @@ -4,7 +4,7 @@ use std::{fmt::Write, ops::Range, path::Path, sync::Arc, time::Instant}; use crate::{ EditPredictionId, ZedUpdateRequiredError, Zeta, - prediction::{EditPrediction, EditPredictionInputs}, + prediction::{EditPredictionInputs, EditPredictionResult}, }; use anyhow::{Context as _, Result}; use cloud_llm_client::{ @@ -36,7 +36,7 @@ pub(crate) fn request_prediction_with_zeta1( position: language::Anchor, events: Vec>, cx: &mut Context, -) -> Task>> { +) -> Task>> { let buffer = buffer.clone(); let buffer_snapshotted_at = Instant::now(); let client = zeta.client.clone(); @@ -216,7 +216,7 @@ pub(crate) fn request_prediction_with_zeta1( ); } - edit_prediction + edit_prediction.map(Some) }) } @@ -229,7 +229,7 @@ fn process_completion_response( buffer_snapshotted_at: Instant, received_response_at: Instant, cx: &AsyncApp, -) -> Task>> { +) -> Task> { let snapshot = snapshot.clone(); let request_id = prediction_response.request_id; let output_excerpt = prediction_response.output_excerpt; @@ -246,8 +246,9 @@ fn process_completion_response( .await? .into(); - Ok(EditPrediction::new( - EditPredictionId(request_id.into()), + let id = EditPredictionId(request_id.into()); + Ok(EditPredictionResult::new( + id, &buffer, &snapshot, edits, diff --git a/crates/zeta/src/zeta_tests.rs b/crates/zeta/src/zeta_tests.rs index eb12f81af25d72b5e7003187ab0a9536622c9a74..9b7abb216f5e8e7a9c8bd14a33c2f6ecd9f16174 100644 --- a/crates/zeta/src/zeta_tests.rs +++ b/crates/zeta/src/zeta_tests.rs @@ -538,7 +538,7 @@ async fn run_edit_prediction( let prediction_task = zeta.update(cx, |zeta, cx| { zeta.request_prediction(&project, buffer, cursor, cx) }); - prediction_task.await.unwrap().unwrap() + prediction_task.await.unwrap().unwrap().prediction.unwrap() } async fn make_test_zeta( diff --git a/crates/zeta_cli/src/predict.rs b/crates/zeta_cli/src/predict.rs index 8a1a4131fb684a5186b2111f9d922fa34d6972e1..c2d68a471fa5de7765c1042473fc8118a3fc9415 100644 --- a/crates/zeta_cli/src/predict.rs +++ b/crates/zeta_cli/src/predict.rs @@ -235,7 +235,10 @@ pub async fn perform_predict( let mut result = Arc::into_inner(result).unwrap().into_inner().unwrap(); result.diff = prediction - .and_then(|prediction| prediction.edit_preview.as_unified_diff(&prediction.edits)) + .and_then(|prediction| { + let prediction = prediction.prediction.ok()?; + prediction.edit_preview.as_unified_diff(&prediction.edits) + }) .unwrap_or_default(); anyhow::Ok(result) From 36a3b41f53182c3b528d027de7270d3ea204d0ee Mon Sep 17 00:00:00 2001 From: Agus Zubiaga Date: Wed, 26 Nov 2025 17:34:29 -0300 Subject: [PATCH 068/749] edit prediction: Request trigger (#43588) Adds a `trigger` field to the zeta1/zeta2 prediction requests so that we can distinguish between editor, diagnostic, and zeta-cli requests. Release Notes: - N/A --- .../cloud_llm_client/src/cloud_llm_client.rs | 11 ++++++ .../cloud_llm_client/src/predict_edits_v3.rs | 4 ++- crates/zeta/src/zeta.rs | 35 ++++++++++++++++--- crates/zeta/src/zeta1.rs | 7 +++- crates/zeta/src/zeta_tests.rs | 2 +- crates/zeta_cli/src/main.rs | 1 + crates/zeta_cli/src/predict.rs | 8 ++++- 7 files changed, 59 insertions(+), 9 deletions(-) diff --git a/crates/cloud_llm_client/src/cloud_llm_client.rs b/crates/cloud_llm_client/src/cloud_llm_client.rs index 15b5a4eda4f8473f48cc66d255598cc6c1d09f08..35916bd6801485c8c2bfde9330a47da19025f2c3 100644 --- a/crates/cloud_llm_client/src/cloud_llm_client.rs +++ b/crates/cloud_llm_client/src/cloud_llm_client.rs @@ -169,6 +169,17 @@ pub struct PredictEditsBody { /// Info about the git repository state, only present when can_collect_data is true. #[serde(skip_serializing_if = "Option::is_none", default)] pub git_info: Option, + /// The trigger for this request. + #[serde(default)] + pub trigger: PredictEditsRequestTrigger, +} + +#[derive(Default, Debug, Clone, Copy, Serialize, Deserialize)] +pub enum PredictEditsRequestTrigger { + Diagnostics, + Cli, + #[default] + Other, } #[derive(Debug, Clone, Serialize, Deserialize)] diff --git a/crates/cloud_llm_client/src/predict_edits_v3.rs b/crates/cloud_llm_client/src/predict_edits_v3.rs index 47e5e71589c806f71725ee4f218ca4a86bee62d0..de8d69dc14870c5583679753c9a75a477e0cc759 100644 --- a/crates/cloud_llm_client/src/predict_edits_v3.rs +++ b/crates/cloud_llm_client/src/predict_edits_v3.rs @@ -9,7 +9,7 @@ use std::{ use strum::EnumIter; use uuid::Uuid; -use crate::PredictEditsGitInfo; +use crate::{PredictEditsGitInfo, PredictEditsRequestTrigger}; #[derive(Debug, Clone, Serialize, Deserialize)] pub struct PlanContextRetrievalRequest { @@ -53,6 +53,8 @@ pub struct PredictEditsRequest { pub prompt_max_bytes: Option, #[serde(default)] pub prompt_format: PromptFormat, + #[serde(default)] + pub trigger: PredictEditsRequestTrigger, } #[derive(Debug, Clone, Serialize, Deserialize)] diff --git a/crates/zeta/src/zeta.rs b/crates/zeta/src/zeta.rs index 5cf0191e2f8180ea7bcfbef07c046372d2ee22c9..8fda34133343e465b1b56835b116770b856cfe36 100644 --- a/crates/zeta/src/zeta.rs +++ b/crates/zeta/src/zeta.rs @@ -5,7 +5,8 @@ use cloud_llm_client::predict_edits_v3::{self, Event, PromptFormat, Signature}; use cloud_llm_client::{ AcceptEditPredictionBody, EXPIRED_LLM_TOKEN_HEADER_NAME, EditPredictionRejectReason, EditPredictionRejection, MAX_EDIT_PREDICTION_REJECTIONS_PER_REQUEST, - MINIMUM_REQUIRED_VERSION_HEADER_NAME, RejectEditPredictionsBody, ZED_VERSION_HEADER_NAME, + MINIMUM_REQUIRED_VERSION_HEADER_NAME, PredictEditsRequestTrigger, RejectEditPredictionsBody, + ZED_VERSION_HEADER_NAME, }; use cloud_zeta2_prompt::retrieval_prompt::{SearchToolInput, SearchToolQuery}; use cloud_zeta2_prompt::{CURSOR_MARKER, DEFAULT_MAX_PROMPT_BYTES}; @@ -1016,7 +1017,13 @@ impl Zeta { self.queue_prediction_refresh(project.clone(), buffer.entity_id(), cx, move |this, cx| { let Some(request_task) = this .update(cx, |this, cx| { - this.request_prediction(&project, &buffer, position, cx) + this.request_prediction( + &project, + &buffer, + position, + PredictEditsRequestTrigger::Other, + cx, + ) }) .log_err() else { @@ -1083,7 +1090,13 @@ impl Zeta { let Some(prediction_result) = this .update(cx, |this, cx| { - this.request_prediction(&project, &jump_buffer, jump_position, cx) + this.request_prediction( + &project, + &jump_buffer, + jump_position, + PredictEditsRequestTrigger::Diagnostics, + cx, + ) })? .await? else { @@ -1264,12 +1277,14 @@ impl Zeta { project: &Entity, active_buffer: &Entity, position: language::Anchor, + trigger: PredictEditsRequestTrigger, cx: &mut Context, ) -> Task>> { self.request_prediction_internal( project.clone(), active_buffer.clone(), position, + trigger, cx.has_flag::(), cx, ) @@ -1280,6 +1295,7 @@ impl Zeta { project: Entity, active_buffer: Entity, position: language::Anchor, + trigger: PredictEditsRequestTrigger, allow_jump: bool, cx: &mut Context, ) -> Task>> { @@ -1305,6 +1321,7 @@ impl Zeta { snapshot.clone(), position, events, + trigger, cx, ), ZetaEditPredictionModel::Zeta2 => self.request_prediction_with_zeta2( @@ -1313,6 +1330,7 @@ impl Zeta { snapshot.clone(), position, events, + trigger, cx, ), ZetaEditPredictionModel::Sweep => self.sweep_ai.request_prediction_with_sweep( @@ -1349,6 +1367,7 @@ impl Zeta { project, jump_buffer, jump_position, + trigger, false, cx, ) @@ -1449,6 +1468,7 @@ impl Zeta { active_snapshot: BufferSnapshot, position: language::Anchor, events: Vec>, + trigger: PredictEditsRequestTrigger, cx: &mut Context, ) -> Task>> { let project_state = self.projects.get(&project.entity_id()); @@ -1621,6 +1641,7 @@ impl Zeta { signatures: vec![], excerpt_parent: None, git_info: None, + trigger, } } ContextMode::Syntax(context_options) => { @@ -1647,6 +1668,7 @@ impl Zeta { index_state.as_deref(), Some(options.max_prompt_bytes), options.prompt_format, + trigger, ) } }; @@ -2416,6 +2438,7 @@ impl Zeta { index_state.as_deref(), Some(options.max_prompt_bytes), options.prompt_format, + PredictEditsRequestTrigger::Other, ) }) }) @@ -2574,6 +2597,7 @@ fn make_syntax_context_cloud_request( index_state: Option<&SyntaxIndexState>, prompt_max_bytes: Option, prompt_format: PromptFormat, + trigger: PredictEditsRequestTrigger, ) -> predict_edits_v3::PredictEditsRequest { let mut signatures = Vec::new(); let mut declaration_to_signature_index = HashMap::default(); @@ -2653,6 +2677,7 @@ fn make_syntax_context_cloud_request( debug_info, prompt_max_bytes, prompt_format, + trigger, } } @@ -3072,7 +3097,7 @@ mod tests { let position = snapshot.anchor_before(language::Point::new(1, 3)); let prediction_task = zeta.update(cx, |zeta, cx| { - zeta.request_prediction(&project, &buffer, position, cx) + zeta.request_prediction(&project, &buffer, position, Default::default(), cx) }); let (_, respond_tx) = requests.predict.next().await.unwrap(); @@ -3145,7 +3170,7 @@ mod tests { let position = snapshot.anchor_before(language::Point::new(1, 3)); let prediction_task = zeta.update(cx, |zeta, cx| { - zeta.request_prediction(&project, &buffer, position, cx) + zeta.request_prediction(&project, &buffer, position, Default::default(), cx) }); let (request, respond_tx) = requests.predict.next().await.unwrap(); diff --git a/crates/zeta/src/zeta1.rs b/crates/zeta/src/zeta1.rs index 96d175d5eb11c2c8be40779cf77bfb743d39dff6..0be5fad301242c51c4ad58c60a6d2fcb3441ea08 100644 --- a/crates/zeta/src/zeta1.rs +++ b/crates/zeta/src/zeta1.rs @@ -8,7 +8,8 @@ use crate::{ }; use anyhow::{Context as _, Result}; use cloud_llm_client::{ - PredictEditsBody, PredictEditsGitInfo, PredictEditsResponse, predict_edits_v3::Event, + PredictEditsBody, PredictEditsGitInfo, PredictEditsRequestTrigger, PredictEditsResponse, + predict_edits_v3::Event, }; use gpui::{App, AppContext as _, AsyncApp, Context, Entity, SharedString, Task}; use input_excerpt::excerpt_for_cursor_position; @@ -35,6 +36,7 @@ pub(crate) fn request_prediction_with_zeta1( snapshot: BufferSnapshot, position: language::Anchor, events: Vec>, + trigger: PredictEditsRequestTrigger, cx: &mut Context, ) -> Task>> { let buffer = buffer.clone(); @@ -70,6 +72,7 @@ pub(crate) fn request_prediction_with_zeta1( &snapshot, cursor_point, prompt_for_events, + trigger, cx, ); @@ -402,6 +405,7 @@ pub fn gather_context( snapshot: &BufferSnapshot, cursor_point: language::Point, prompt_for_events: impl FnOnce() -> (String, usize) + Send + 'static, + trigger: PredictEditsRequestTrigger, cx: &App, ) -> Task> { cx.background_spawn({ @@ -425,6 +429,7 @@ pub fn gather_context( git_info: None, outline: None, speculated_output: None, + trigger, }; Ok(GatherContextOutput { diff --git a/crates/zeta/src/zeta_tests.rs b/crates/zeta/src/zeta_tests.rs index 9b7abb216f5e8e7a9c8bd14a33c2f6ecd9f16174..3549cda36d575a989f5bc4bd5bb8bea6810d3180 100644 --- a/crates/zeta/src/zeta_tests.rs +++ b/crates/zeta/src/zeta_tests.rs @@ -536,7 +536,7 @@ async fn run_edit_prediction( zeta.update(cx, |zeta, cx| zeta.register_buffer(buffer, &project, cx)); cx.background_executor.run_until_parked(); let prediction_task = zeta.update(cx, |zeta, cx| { - zeta.request_prediction(&project, buffer, cursor, cx) + zeta.request_prediction(&project, buffer, cursor, Default::default(), cx) }); prediction_task.await.unwrap().unwrap().prediction.unwrap() } diff --git a/crates/zeta_cli/src/main.rs b/crates/zeta_cli/src/main.rs index d13f0710cdc4d16666594d25dc639d337fb6bdfc..2d5a23e31f463455871494d123a4988b41b5bd66 100644 --- a/crates/zeta_cli/src/main.rs +++ b/crates/zeta_cli/src/main.rs @@ -454,6 +454,7 @@ async fn zeta1_context( &snapshot, clipped_cursor, prompt_for_events, + cloud_llm_client::PredictEditsRequestTrigger::Cli, cx, ) })? diff --git a/crates/zeta_cli/src/predict.rs b/crates/zeta_cli/src/predict.rs index c2d68a471fa5de7765c1042473fc8118a3fc9415..99fe65cfa3221a1deb18e767e8faa8e1a1fca0ac 100644 --- a/crates/zeta_cli/src/predict.rs +++ b/crates/zeta_cli/src/predict.rs @@ -226,7 +226,13 @@ pub async fn perform_predict( let prediction = zeta .update(cx, |zeta, cx| { - zeta.request_prediction(&project, &cursor_buffer, cursor_anchor, cx) + zeta.request_prediction( + &project, + &cursor_buffer, + cursor_anchor, + cloud_llm_client::PredictEditsRequestTrigger::Cli, + cx, + ) })? .await?; From ae649c66ed45cbb6336a4df35bf234671f088b2c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ole=20J=C3=B8rgen=20Br=C3=B8nner?= Date: Wed, 26 Nov 2025 22:16:50 +0100 Subject: [PATCH 069/749] Make key repeat rate on Wayland more precise (2) (#43589) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit CLOSES #39042 This is a reopening of #34985+. _Original descrioption_: In Wayland, the client implement key repeat themself. In Zed this is ultimately handled by the gpui crate by inserting a timer source into the event loop which repeat itself if the key is still held down [1]. But it seems the processing of the repeated key event happen synchronously inside the timer source handler, meaning the effective rate become slightly lower (since the repeated timer is scheduled using the 1/rate as delay). I measured the event processing time on my laptop and it's typically around 3ms, but sometimes spiking at 10ms. At low key repeat rates this is probably not _very_ noticeable. I see the default in Zed is set to a (measly) 16/s, but I assume most systems will use something closer to 25, which is a 40ms delay. So ~3ms is around 7.5% of the delay. At higher rate the discrepancy become worse of course. I can visible notice the spikes, and doing some crude stopwatch measurements using gedit as a reference I can reproduce around 5-10% slower rates in Zed. IMO this is significant enough to warrant improving, especially since some people can get quite used the repeat rate and might feel something being "off" in Zed. ~~The suggested fix simply subtract the processing time from the next delay timer.~~ [1] https://github.com/olejorgenb/zed/blob/32df726f3b7fa83e7399f6629c59e0a3f3fff125/crates/gpui/src/platform/linux/wayland/client.rs#L1355 Release Notes: - Improved Wayland (Linux) key repeat rate precision --- crates/gpui/src/platform/linux/wayland/client.rs | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/crates/gpui/src/platform/linux/wayland/client.rs b/crates/gpui/src/platform/linux/wayland/client.rs index 9a9ec213edd27d9ab7ac2e1437f408ac7d78f08e..a2324648fbb332e75af7df74923806797d93a05a 100644 --- a/crates/gpui/src/platform/linux/wayland/client.rs +++ b/crates/gpui/src/platform/linux/wayland/client.rs @@ -1419,6 +1419,7 @@ impl Dispatch for WaylandClientStatePtr { state.repeat.current_keycode = Some(keycode); let rate = state.repeat.characters_per_second; + let repeat_interval = Duration::from_secs(1) / rate; let id = state.repeat.current_id; state .loop_handle @@ -1428,7 +1429,7 @@ impl Dispatch for WaylandClientStatePtr { is_held: true, prefer_character_input: false, }); - move |_event, _metadata, this| { + move |event_timestamp, _metadata, this| { let mut client = this.get_client(); let mut state = client.borrow_mut(); let is_repeating = id == state.repeat.current_id @@ -1445,7 +1446,8 @@ impl Dispatch for WaylandClientStatePtr { drop(state); focused_window.handle_input(input.clone()); - TimeoutAction::ToDuration(Duration::from_secs(1) / rate) + // If the new scheduled time is in the past the event will repeat as soon as possible + TimeoutAction::ToInstant(event_timestamp + repeat_interval) } }) .unwrap(); From c366627642fdf962d57e63c65f16d62d306beeb3 Mon Sep 17 00:00:00 2001 From: Piotr Osiewicz <24362066+osiewicz@users.noreply.github.com> Date: Wed, 26 Nov 2025 22:42:03 +0100 Subject: [PATCH 070/749] auto-update: Fix auto-update loop with non-nightly channels (#43595) There are 3 factors: 1. The Preview channel endpoint does not propagate versions with build identifier (which we oh-so-conveniently store in pre-release field of semver). 2. Preview build, once fetched, sees it's version *with* build identifier (as that's baked into the binary). 3. Auto update logic treats versions with pre-release version as less than versions without pre-release version. This in turn makes any Preview client see itself as versioned like 0.214.4-123-asdf1234455311, whereas the latest version on the endpoint is 0.214.4. Thus, the endpoint version is always more recent than the client version, causing an update loop. The fix is to ignore build identifier when comparing versions of non-nightly channels. This should still let us introduce changes to auto-update behavior in minor releases in the future. Closes #43584 Release Notes: - (Preview only): Fixed an update loop with latest Preview update. --- crates/auto_update/src/auto_update.rs | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/crates/auto_update/src/auto_update.rs b/crates/auto_update/src/auto_update.rs index 599afcf62d610cfc57a1216f46b1910a88e99bea..1f4d05630653b0dd8038eab4279ae597ec6d2fbe 100644 --- a/crates/auto_update/src/auto_update.rs +++ b/crates/auto_update/src/auto_update.rs @@ -717,9 +717,12 @@ impl AutoUpdater { } fn check_if_fetched_version_is_newer_non_nightly( - installed_version: Version, + mut installed_version: Version, fetched_version: Version, ) -> Result> { + // For non-nightly releases, ignore build and pre-release fields as they're not provided by our endpoints right now. + installed_version.build = semver::BuildMetadata::EMPTY; + installed_version.pre = semver::Prerelease::EMPTY; let should_download = fetched_version > installed_version; let newer_version = should_download.then(|| VersionCheckType::Semantic(fetched_version)); Ok(newer_version) From 958f1098b77ca11a010fae38bd9d5cbfd8a384d5 Mon Sep 17 00:00:00 2001 From: Finn Evers Date: Wed, 26 Nov 2025 23:51:11 +0100 Subject: [PATCH 071/749] editor: Consider experimental theme overrides for colorized bracket invalidation (#43602) Release Notes: - Fixed a small issue where bracket colors would not be immediately updated if `experimental_theme_overrides.accents` was changed. --- crates/editor/src/editor.rs | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/crates/editor/src/editor.rs b/crates/editor/src/editor.rs index cd7a872f8c129c3b67b544ed2ba78d7fde104b48..7cdd587db48de1f03bf54949c3bfbe7870a07073 100644 --- a/crates/editor/src/editor.rs +++ b/crates/editor/src/editor.rs @@ -21565,12 +21565,22 @@ impl Editor { return Vec::new(); } - theme::ThemeSettings::get_global(cx) + let theme_settings = theme::ThemeSettings::get_global(cx); + + theme_settings .theme_overrides .get(cx.theme().name.as_ref()) .map(|theme_style| &theme_style.accents) .into_iter() .flatten() + .chain( + theme_settings + .experimental_theme_overrides + .as_ref() + .map(|overrides| &overrides.accents) + .into_iter() + .flatten(), + ) .flat_map(|accent| accent.0.clone()) .collect() } From 54309f4a4823ea1db344e3e48a28fd2687e310bb Mon Sep 17 00:00:00 2001 From: Kirill Bulatov Date: Thu, 27 Nov 2025 01:22:13 +0200 Subject: [PATCH 072/749] Account for greedy tree-sitter bracket matches (#43607) Current approach is to colorize brackets based on their depth, which was broken for markdown: image Markdown grammar, for bracket queries https://github.com/zed-industries/zed/blob/00e93bfa113a3daed6e4a97a7244ad04d58453ee/crates/languages/src/markdown/brackets.scm#L1-L8 and markdown document `[LLM-powered features](./ai/overview.md), [bring and configure your own API keys](./ai/llm-providers.md#use-your-own-keys)`, matches first bracket (offset 0) with two different ones: * `[LLM-powered features]` * `[LLM-powered features](./ai/overview.md), [bring and configure your own API keys]` which mix and add different color markers. Now, in case multiple pairs exist for the same first bracket, Zed will only colorize the shortest one: image Release Notes: - Fixed bracket colorization mixing colors in markdown files --- crates/editor/src/bracket_colorization.rs | 27 +++++++- crates/language/src/buffer.rs | 77 ++++++++++++++++------- 2 files changed, 81 insertions(+), 23 deletions(-) diff --git a/crates/editor/src/bracket_colorization.rs b/crates/editor/src/bracket_colorization.rs index 902ec2b7702b945bb482e4e4700cf37b36ae907b..65d8c139e99437e37d0c18551dd01475ac824bfd 100644 --- a/crates/editor/src/bracket_colorization.rs +++ b/crates/editor/src/bracket_colorization.rs @@ -161,7 +161,7 @@ mod tests { use gpui::{AppContext as _, UpdateGlobal as _}; use indoc::indoc; use itertools::Itertools; - use language::Capability; + use language::{Capability, markdown_lang}; use languages::rust_lang; use multi_buffer::{ExcerptRange, MultiBuffer}; use pretty_assertions::assert_eq; @@ -261,6 +261,31 @@ where ); } + #[gpui::test] + async fn test_markdown_bracket_colorization(cx: &mut gpui::TestAppContext) { + init_test(cx, |language_settings| { + language_settings.defaults.colorize_brackets = Some(true); + }); + let mut cx = EditorLspTestContext::new( + Arc::into_inner(markdown_lang()).unwrap(), + lsp::ServerCapabilities::default(), + cx, + ) + .await; + + cx.set_state(indoc! {r#"ˇ[LLM-powered features](./ai/overview.md), [bring and configure your own API keys](./ai/llm-providers.md#use-your-own-keys)"#}); + cx.executor().advance_clock(Duration::from_millis(100)); + cx.executor().run_until_parked(); + + assert_eq!( + r#"«1[LLM-powered features]1»«1(./ai/overview.md)1», «1[bring and configure your own API keys]1»«1(./ai/llm-providers.md#use-your-own-keys)1» +1 hsla(207.80, 16.20%, 69.19%, 1.00) +"#, + &bracket_colors_markup(&mut cx), + "All markdown brackets should be colored based on their depth" + ); + } + #[gpui::test] async fn test_bracket_colorization_when_editing(cx: &mut gpui::TestAppContext) { init_test(cx, |language_settings| { diff --git a/crates/language/src/buffer.rs b/crates/language/src/buffer.rs index c599a4751b60f150e31b7ddf6e32a6234a510c74..746992fa7e59650e5af59887630f8c2ee1b39450 100644 --- a/crates/language/src/buffer.rs +++ b/crates/language/src/buffer.rs @@ -45,12 +45,12 @@ use std::{ borrow::Cow, cell::Cell, cmp::{self, Ordering, Reverse}, - collections::{BTreeMap, BTreeSet}, + collections::{BTreeMap, BTreeSet, hash_map}, future::Future, iter::{self, Iterator, Peekable}, mem, num::NonZeroU32, - ops::{Deref, Not, Range}, + ops::{Deref, Range}, path::PathBuf, rc, sync::{Arc, LazyLock}, @@ -4236,6 +4236,7 @@ impl BufferSnapshot { let mut new_bracket_matches = HashMap::default(); let mut all_bracket_matches = HashMap::default(); + let mut bracket_matches_to_color = HashMap::default(); for chunk in tree_sitter_data .chunks @@ -4265,7 +4266,7 @@ impl BufferSnapshot { .collect::>(); let chunk_range = chunk_range.clone(); - let new_matches = iter::from_fn(move || { + let tree_sitter_matches = iter::from_fn(|| { while let Some(mat) = matches.peek() { let mut open = None; let mut close = None; @@ -4291,32 +4292,64 @@ impl BufferSnapshot { continue; } + if !pattern.rainbow_exclude { + // Certain tree-sitter grammars may return more bracket pairs than needed: + // see `test_markdown_bracket_colorization` for a set-up that returns pairs with the same start bracket and different end one. + // Pick the pair with the shortest range in case of ambiguity. + match bracket_matches_to_color.entry(open_range.clone()) { + hash_map::Entry::Vacant(v) => { + v.insert(close_range.clone()); + } + hash_map::Entry::Occupied(mut o) => { + let previous_close_range = o.get(); + let previous_length = + previous_close_range.end - open_range.start; + let new_length = close_range.end - open_range.start; + if new_length < previous_length { + o.insert(close_range.clone()); + } + } + } + } return Some((open_range, close_range, pattern, depth)); } None }) .sorted_by_key(|(open_range, _, _, _)| open_range.start) - .map(|(open_range, close_range, pattern, syntax_layer_depth)| { - while let Some(&last_bracket_end) = bracket_pairs_ends.last() { - if last_bracket_end <= open_range.start { - bracket_pairs_ends.pop(); - } else { - break; - } - } + .collect::>(); - let bracket_depth = bracket_pairs_ends.len(); - bracket_pairs_ends.push(close_range.end); + let new_matches = tree_sitter_matches + .into_iter() + .map(|(open_range, close_range, pattern, syntax_layer_depth)| { + let participates_in_coloring = + bracket_matches_to_color.get(&open_range).is_some_and( + |close_range_to_color| close_range_to_color == &close_range, + ); + let color_index = if participates_in_coloring { + while let Some(&last_bracket_end) = bracket_pairs_ends.last() { + if last_bracket_end <= open_range.start { + bracket_pairs_ends.pop(); + } else { + break; + } + } - BracketMatch { - open_range, - close_range, - syntax_layer_depth, - newline_only: pattern.newline_only, - color_index: pattern.rainbow_exclude.not().then_some(bracket_depth), - } - }) - .collect::>(); + let bracket_depth = bracket_pairs_ends.len(); + bracket_pairs_ends.push(close_range.end); + Some(bracket_depth) + } else { + None + }; + + BracketMatch { + open_range, + close_range, + syntax_layer_depth, + newline_only: pattern.newline_only, + color_index, + } + }) + .collect::>(); new_bracket_matches.insert(chunk.id, new_matches.clone()); new_matches From 8c355b5eeeee1cad71a3f584dba95c1ba7f62a7f Mon Sep 17 00:00:00 2001 From: Finn Evers Date: Thu, 27 Nov 2025 00:39:18 +0100 Subject: [PATCH 073/749] Improve `extension_bump` workflow (#43612) This extends the extension CI workflow to create a tag once the version is bumped on main. Release Notes: - N/A --- .github/workflows/extension_bump.yml | 33 +++++++- .../src/tasks/workflows/extension_bump.rs | 84 ++++++++++++++++--- 2 files changed, 104 insertions(+), 13 deletions(-) diff --git a/.github/workflows/extension_bump.yml b/.github/workflows/extension_bump.yml index 5933ab7fbb2fab753cbda729c82026102e395539..779116ec5b2ea4c766212ea2b61993eb82693992 100644 --- a/.github/workflows/extension_bump.yml +++ b/.github/workflows/extension_bump.yml @@ -59,7 +59,7 @@ jobs: fetch-depth: 10 - id: compare-versions-check name: extension_bump::compare_versions - run: |+ + run: | CURRENT_VERSION="$(sed -n 's/version = \"\(.*\)\"/\1/p' < extension.toml)" git checkout "$(git log -1 --format=%H)"~1 @@ -70,9 +70,11 @@ jobs: echo "needs_bump=true" >> "$GITHUB_OUTPUT" || \ echo "needs_bump=false" >> "$GITHUB_OUTPUT" + echo "current_version=${CURRENT_VERSION}" >> "$GITHUB_OUTPUT" shell: bash -euxo pipefail {0} outputs: needs_bump: ${{ steps.compare-versions-check.outputs.needs_bump }} + current_version: ${{ steps.compare-versions-check.outputs.current_version }} timeout-minutes: 1 bump_extension_version: needs: @@ -131,6 +133,35 @@ jobs: token: ${{ steps.generate-token.outputs.token }} sign-commits: true timeout-minutes: 1 + create_version_label: + needs: + - check_extension + - check_bump_needed + if: (github.repository_owner == 'zed-industries' || github.repository_owner == 'zed-extensions') && needs.check_bump_needed.outputs.needs_bump == 'false' + runs-on: namespace-profile-8x16-ubuntu-2204 + steps: + - id: generate-token + name: extension_bump::generate_token + uses: actions/create-github-app-token@v2 + with: + app-id: ${{ secrets.app-id }} + private-key: ${{ secrets.app-secret }} + - name: steps::checkout_repo + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 + with: + clean: false + - name: extension_bump::create_version_tag + uses: actions/github-script@v7 + with: + script: |- + github.rest.git.createRef({ + owner: context.repo.owner, + repo: context.repo.repo, + ref: 'refs/tags/v${{ needs.check_bump_needed.outputs.current_version }}', + sha: context.sha + }) + github-token: ${{ steps.generate-token.outputs.token }} + timeout-minutes: 1 concurrency: group: ${{ github.workflow }}-${{ github.ref_name }}-${{ github.ref_name == 'main' && github.sha || 'anysha' }} cancel-in-progress: true diff --git a/tooling/xtask/src/tasks/workflows/extension_bump.rs b/tooling/xtask/src/tasks/workflows/extension_bump.rs index 66de1f86aa998269abc24f1de375dbe1800acc31..85e7dbeceed0d05f60f04dbf055553f71228ce54 100644 --- a/tooling/xtask/src/tasks/workflows/extension_bump.rs +++ b/tooling/xtask/src/tasks/workflows/extension_bump.rs @@ -31,11 +31,19 @@ pub(crate) fn extension_bump() -> Workflow { WorkflowSecret::new("app-secret", "The app secret for the corresponding app ID"); let test_extension = extension_tests::check_extension(); - let (check_bump_needed, needs_bump) = check_bump_needed(); - let bump_version = bump_extension_version( - &[&test_extension, &check_bump_needed], - &bump_type, - needs_bump.as_job_output(&check_bump_needed), + let (check_bump_needed, needs_bump, current_version) = check_bump_needed(); + + let needs_bump = needs_bump.as_job_output(&check_bump_needed); + let current_version = current_version.as_job_output(&check_bump_needed); + + let dependencies = [&test_extension, &check_bump_needed]; + + let bump_version = + bump_extension_version(&dependencies, &bump_type, &needs_bump, &app_id, &app_secret); + let create_label = create_version_label( + &dependencies, + &needs_bump, + ¤t_version, &app_id, &app_secret, ); @@ -65,24 +73,74 @@ pub(crate) fn extension_bump() -> Workflow { .add_job(test_extension.name, test_extension.job) .add_job(check_bump_needed.name, check_bump_needed.job) .add_job(bump_version.name, bump_version.job) + .add_job(create_label.name, create_label.job) } -fn check_bump_needed() -> (NamedJob, StepOutput) { - let (compare_versions, version_changed) = compare_versions(); +fn check_bump_needed() -> (NamedJob, StepOutput, StepOutput) { + let (compare_versions, version_changed, current_version) = compare_versions(); let job = Job::default() .with_repository_owner_guard() - .outputs([(version_changed.name.to_owned(), version_changed.to_string())]) + .outputs([ + (version_changed.name.to_owned(), version_changed.to_string()), + ( + current_version.name.to_string(), + current_version.to_string(), + ), + ]) .runs_on(runners::LINUX_SMALL) .timeout_minutes(1u32) .add_step(steps::checkout_repo().add_with(("fetch-depth", 10))) .add_step(compare_versions); - (named::job(job), version_changed) + (named::job(job), version_changed, current_version) +} + +fn create_version_label( + dependencies: &[&NamedJob], + needs_bump: &JobOutput, + current_version: &JobOutput, + app_id: &WorkflowSecret, + app_secret: &WorkflowSecret, +) -> NamedJob { + let (generate_token, generated_token) = generate_token(app_id, app_secret); + let job = steps::dependant_job(dependencies) + .cond(Expression::new(format!( + "{DEFAULT_REPOSITORY_OWNER_GUARD} && {} == 'false'", + needs_bump.expr(), + ))) + .runs_on(runners::LINUX_LARGE) + .timeout_minutes(1u32) + .add_step(generate_token) + .add_step(steps::checkout_repo()) + .add_step(create_version_tag(current_version, generated_token)); + + named::job(job) +} + +fn create_version_tag(current_version: &JobOutput, generated_token: StepOutput) -> Step { + named::uses("actions", "github-script", "v7").with( + Input::default() + .add( + "script", + format!( + indoc! {r#" + github.rest.git.createRef({{ + owner: context.repo.owner, + repo: context.repo.repo, + ref: 'refs/tags/v{}', + sha: context.sha + }})"# + }, + current_version + ), + ) + .add("github-token", generated_token.to_string()), + ) } /// Compares the current and previous commit and checks whether versions changed inbetween. -fn compare_versions() -> (Step, StepOutput) { +fn compare_versions() -> (Step, StepOutput, StepOutput) { let check_needs_bump = named::bash(format!( indoc! { r#" @@ -96,6 +154,7 @@ fn compare_versions() -> (Step, StepOutput) { echo "needs_bump=true" >> "$GITHUB_OUTPUT" || \ echo "needs_bump=false" >> "$GITHUB_OUTPUT" + echo "current_version=${{CURRENT_VERSION}}" >> "$GITHUB_OUTPUT" "# }, VERSION_CHECK, VERSION_CHECK @@ -103,14 +162,15 @@ fn compare_versions() -> (Step, StepOutput) { .id("compare-versions-check"); let needs_bump = StepOutput::new(&check_needs_bump, "needs_bump"); + let current_version = StepOutput::new(&check_needs_bump, "current_version"); - (check_needs_bump, needs_bump) + (check_needs_bump, needs_bump, current_version) } fn bump_extension_version( dependencies: &[&NamedJob], bump_type: &WorkflowInput, - needs_bump: JobOutput, + needs_bump: &JobOutput, app_id: &WorkflowSecret, app_secret: &WorkflowSecret, ) -> NamedJob { From 82b768258f966c157ed960d19ae2d4bdce81779f Mon Sep 17 00:00:00 2001 From: Piotr Osiewicz <24362066+osiewicz@users.noreply.github.com> Date: Thu, 27 Nov 2025 00:55:11 +0100 Subject: [PATCH 074/749] remote: Do not include prerelease and build meta in asset queries (#43611) Closes #43580 Release Notes: - (Preview only) Fixed failures to fetch remoting server (needed to run remoting). --- crates/auto_update/src/auto_update.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/crates/auto_update/src/auto_update.rs b/crates/auto_update/src/auto_update.rs index 1f4d05630653b0dd8038eab4279ae597ec6d2fbe..06e87c53dba2acfffb03cdc75cbbaa7cc2f44a6a 100644 --- a/crates/auto_update/src/auto_update.rs +++ b/crates/auto_update/src/auto_update.rs @@ -510,7 +510,9 @@ impl AutoUpdater { (None, None, None) }; - let version = if let Some(version) = version { + let version = if let Some(mut version) = version { + version.pre = semver::Prerelease::EMPTY; + version.build = semver::BuildMetadata::EMPTY; version.to_string() } else { "latest".to_string() From 91400e74897898eeb5026d5bea5b35666c773c96 Mon Sep 17 00:00:00 2001 From: Kirill Bulatov Date: Thu, 27 Nov 2025 10:37:34 +0200 Subject: [PATCH 075/749] Do not color html tags and other "long" "bracket pairs" (#43644) Closes https://github.com/zed-industries/zed/issues/43621 Follow-up of https://github.com/zed-industries/zed/pull/43172 Release Notes: - (Preview only) Fixed html tags incorrectly colorized --- crates/language/src/buffer.rs | 12 ++++++-- crates/language/src/language.rs | 49 ++++++++------------------------- 2 files changed, 20 insertions(+), 41 deletions(-) diff --git a/crates/language/src/buffer.rs b/crates/language/src/buffer.rs index 746992fa7e59650e5af59887630f8c2ee1b39450..66967f9a3357e13485b8228b06874804a8768fac 100644 --- a/crates/language/src/buffer.rs +++ b/crates/language/src/buffer.rs @@ -4292,7 +4292,13 @@ impl BufferSnapshot { continue; } - if !pattern.rainbow_exclude { + if !pattern.rainbow_exclude + // Also, certain languages have "brackets" that are not brackets, e.g. tags. and such + // bracket will match the entire tag with all text inside. + // For now, avoid highlighting any pair that has more than single char in each bracket. + // We need to colorize `` bracket pairs, so cannot make this check stricter. + && (open_range.len() == 1 || close_range.len() == 1) + { // Certain tree-sitter grammars may return more bracket pairs than needed: // see `test_markdown_bracket_colorization` for a set-up that returns pairs with the same start bracket and different end one. // Pick the pair with the shortest range in case of ambiguity. @@ -4321,11 +4327,11 @@ impl BufferSnapshot { let new_matches = tree_sitter_matches .into_iter() .map(|(open_range, close_range, pattern, syntax_layer_depth)| { - let participates_in_coloring = + let participates_in_colorizing = bracket_matches_to_color.get(&open_range).is_some_and( |close_range_to_color| close_range_to_color == &close_range, ); - let color_index = if participates_in_coloring { + let color_index = if participates_in_colorizing { while let Some(&last_bracket_end) = bracket_pairs_ends.last() { if last_bracket_end <= open_range.start { bracket_pairs_ends.pop(); diff --git a/crates/language/src/language.rs b/crates/language/src/language.rs index 03e563e145c3bd1cde63e62fa8a09a4fb0228f0f..c0a280767395dd83e8a36e554294d7d044b64e2e 100644 --- a/crates/language/src/language.rs +++ b/crates/language/src/language.rs @@ -2643,42 +2643,15 @@ pub fn rust_lang() -> Arc { outline: Some(Cow::from(include_str!( "../../languages/src/rust/outline.scm" ))), - indents: Some(Cow::from( - r#" -[ - ((where_clause) _ @end) - (field_expression) - (call_expression) - (assignment_expression) - (let_declaration) - (let_chain) - (await_expression) -] @indent - -(_ "[" "]" @end) @indent -(_ "<" ">" @end) @indent -(_ "{" "}" @end) @indent -(_ "(" ")" @end) @indent"#, - )), - brackets: Some(Cow::from( - r#" -("(" @open ")" @close) -("[" @open "]" @close) -("{" @open "}" @close) -("<" @open ">" @close) -(closure_parameters "|" @open "|" @close) -(("\"" @open "\"" @close) (#set! rainbow.exclude)) -(("'" @open "'" @close) (#set! rainbow.exclude))"#, - )), - text_objects: Some(Cow::from( - r#" -(function_item - body: (_ - "{" - (_)* @function.inside - "}" )) @function.around - "#, - )), + indents: Some(Cow::from(include_str!( + "../../languages/src/rust/indents.scm" + ))), + brackets: Some(Cow::from(include_str!( + "../../languages/src/rust/brackets.scm" + ))), + text_objects: Some(Cow::from(include_str!( + "../../languages/src/rust/textobjects.scm" + ))), ..LanguageQueries::default() }) .expect("Could not parse queries"); @@ -2697,7 +2670,7 @@ pub fn markdown_lang() -> Arc { path_suffixes: vec!["md".into()], ..Default::default() }, - ..Default::default() + ..LanguageConfig::default() }, Some(tree_sitter_md::LANGUAGE.into()), ) @@ -2708,7 +2681,7 @@ pub fn markdown_lang() -> Arc { injections: Some(Cow::from(include_str!( "../../languages/src/markdown/injections.scm" ))), - ..Default::default() + ..LanguageQueries::default() }) .expect("Could not parse markdown queries"); Arc::new(language) From 99d7b2fa1de8c9d5b6d33db0d4c90ba1571e2f8e Mon Sep 17 00:00:00 2001 From: Oleksiy Syvokon Date: Thu, 27 Nov 2025 11:10:35 +0200 Subject: [PATCH 076/749] zeta2: Compute diff-aware chrF metric (#43485) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Zeta evals now include a character n-gram metric adapted for multi-edit diffs (“delta chrF”). It works as follows: 1. Reconstruct the original, golden (expected), and actual texts from unified diffs. - "original": the text before any edits - "golden": the text after applying the expected edits - "actual": the text after applying the actual edits 2. Compute n-gram count deltas between original→golden and original→actual. - n-grams are computed as in chrF (max n=6, whitespace ignored). 3. Compare these deltas to assess how well the actual edits match the expected edits. - As in standard chrF, classify n-grams as true positives, false positives, and false negatives, and report the F-beta score with beta=2. Release Notes: - N/A --- crates/language/src/buffer.rs | 4 +- crates/zeta_cli/src/evaluate.rs | 203 +++++++---------- crates/zeta_cli/src/main.rs | 1 + crates/zeta_cli/src/metrics.rs | 380 ++++++++++++++++++++++++++++++++ 4 files changed, 464 insertions(+), 124 deletions(-) create mode 100644 crates/zeta_cli/src/metrics.rs diff --git a/crates/language/src/buffer.rs b/crates/language/src/buffer.rs index 66967f9a3357e13485b8228b06874804a8768fac..7d713d515b2ae9584bc922d08d5811155f83d3a8 100644 --- a/crates/language/src/buffer.rs +++ b/crates/language/src/buffer.rs @@ -758,8 +758,8 @@ impl EditPreview { .to_point(&self.applied_edits_snapshot); let start = Point::new(start.row.saturating_sub(3), 0); - let old_end = Point::new(old_end.row + 3, 0).min(self.old_snapshot.max_point()); - let new_end = Point::new(new_end.row + 3, 0).min(self.applied_edits_snapshot.max_point()); + let old_end = Point::new(old_end.row + 4, 0).min(self.old_snapshot.max_point()); + let new_end = Point::new(new_end.row + 4, 0).min(self.applied_edits_snapshot.max_point()); Some(unified_diff( &self diff --git a/crates/zeta_cli/src/evaluate.rs b/crates/zeta_cli/src/evaluate.rs index 6726dcb3aafdeff7fe41cbbbc49850c1e7465cf4..043844768557ad46f61d5fd0d809e1e85c62574f 100644 --- a/crates/zeta_cli/src/evaluate.rs +++ b/crates/zeta_cli/src/evaluate.rs @@ -1,3 +1,4 @@ +use crate::metrics::{self, Scores}; use std::{ collections::HashMap, io::{IsTerminal, Write}, @@ -5,7 +6,6 @@ use std::{ }; use anyhow::Result; -use collections::HashSet; use gpui::{AsyncApp, Entity}; use project::Project; use util::ResultExt as _; @@ -119,13 +119,14 @@ fn write_aggregated_scores( } if successful.len() > 1 { - let mut edit_predictions = successful + let edit_scores = successful .iter() - .filter_map(|r| r.edit_prediction.as_ref()) - .peekable(); - let has_edit_predictions = edit_predictions.peek().is_some(); + .filter_map(|r| r.edit_scores.clone()) + .collect::>(); + let has_edit_predictions = edit_scores.len() > 0; let aggregated_result = EvaluationResult { - edit_prediction: has_edit_predictions.then(|| Scores::aggregate(edit_predictions)), + context_scores: Scores::aggregate(successful.iter().map(|r| &r.context_scores)), + edit_scores: has_edit_predictions.then(|| EditScores::aggregate(&edit_scores)), prompt_len: successful.iter().map(|r| r.prompt_len).sum::() / successful.len(), generated_len: successful.iter().map(|r| r.generated_len).sum::() / successful.len(), @@ -247,94 +248,27 @@ fn write_eval_result( anyhow::Ok(()) } -#[derive(Debug, Default)] -pub struct EvaluationResult { - pub edit_prediction: Option, - pub prompt_len: usize, - pub generated_len: usize, -} - -#[derive(Default, Debug)] -pub struct Scores { - pub true_positives: usize, - pub false_positives: usize, - pub false_negatives: usize, +#[derive(Debug, Default, Clone)] +pub struct EditScores { + pub line_match: Scores, + pub chr_f: f64, } -impl Scores { - pub fn new(expected: &HashSet, actual: &HashSet) -> Scores { - let true_positives = expected.intersection(actual).count(); - let false_positives = actual.difference(expected).count(); - let false_negatives = expected.difference(actual).count(); - - Scores { - true_positives, - false_positives, - false_negatives, - } - } - - pub fn to_markdown(&self) -> String { - format!( - " -Precision : {:.4} -Recall : {:.4} -F1 Score : {:.4} -True Positives : {} -False Positives : {} -False Negatives : {}", - self.precision(), - self.recall(), - self.f1_score(), - self.true_positives, - self.false_positives, - self.false_negatives - ) - } - - pub fn aggregate<'a>(scores: impl Iterator) -> Scores { - let mut true_positives = 0; - let mut false_positives = 0; - let mut false_negatives = 0; - - for score in scores { - true_positives += score.true_positives; - false_positives += score.false_positives; - false_negatives += score.false_negatives; - } - - Scores { - true_positives, - false_positives, - false_negatives, - } - } +impl EditScores { + pub fn aggregate(scores: &[EditScores]) -> EditScores { + let line_match = Scores::aggregate(scores.iter().map(|s| &s.line_match)); + let chr_f = scores.iter().map(|s| s.chr_f).sum::() / scores.len() as f64; - pub fn precision(&self) -> f64 { - if self.true_positives + self.false_positives == 0 { - 0.0 - } else { - self.true_positives as f64 / (self.true_positives + self.false_positives) as f64 - } - } - - pub fn recall(&self) -> f64 { - if self.true_positives + self.false_negatives == 0 { - 0.0 - } else { - self.true_positives as f64 / (self.true_positives + self.false_negatives) as f64 - } + EditScores { line_match, chr_f } } +} - pub fn f1_score(&self) -> f64 { - let recall = self.recall(); - let precision = self.precision(); - if precision + recall == 0.0 { - 0.0 - } else { - 2.0 * precision * recall / (precision + recall) - } - } +#[derive(Debug, Default)] +pub struct EvaluationResult { + pub edit_scores: Option, + pub context_scores: Scores, + pub prompt_len: usize, + pub generated_len: usize, } impl std::fmt::Display for EvaluationResult { @@ -349,40 +283,74 @@ impl std::fmt::Display for EvaluationResult { impl EvaluationResult { fn fmt_markdown(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - if let Some(prediction) = &self.edit_prediction { + write!( + f, + r#" +### Context Scores +{} +"#, + self.context_scores.to_markdown(), + )?; + if let Some(scores) = &self.edit_scores { write!( f, r#" ### Edit Prediction Scores {}"#, - prediction.to_markdown() + scores.line_match.to_markdown() )?; } Ok(()) } fn fmt_table(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - writeln!(f, "### Scores\n")?; + writeln!(f, "#### Prompt Statistics")?; + writeln!(f, "─────────────────────────")?; + writeln!(f, "Prompt_len Generated_len")?; + writeln!(f, "─────────────────────────")?; + writeln!(f, "{:<11} {:<14}", self.prompt_len, self.generated_len,)?; + writeln!(f)?; + writeln!(f)?; + writeln!(f, "#### Performance Scores")?; writeln!( f, - " Prompt Generated TP FP FN Precision Recall F1" + "──────────────────────────────────────────────────────────────────" )?; writeln!( f, - "───────────────────────────────────────────────────────────────────────────────────────────────" + " TP FP FN Precision Recall F1" )?; - if let Some(edit_prediction) = &self.edit_prediction { + writeln!( + f, + "──────────────────────────────────────────────────────────────────" + )?; + writeln!( + f, + "Context Retrieval {:<6} {:<6} {:<6} {:>8.2} {:>7.2} {:>6.2}", + self.context_scores.true_positives, + self.context_scores.false_positives, + self.context_scores.false_negatives, + self.context_scores.precision() * 100.0, + self.context_scores.recall() * 100.0, + self.context_scores.f1_score() * 100.0 + )?; + if let Some(edit_scores) = &self.edit_scores { + let line_match = &edit_scores.line_match; + writeln!(f, "Edit Prediction")?; writeln!( f, - "Edit Prediction {:<7} {:<9} {:<6} {:<6} {:<6} {:>9.2} {:>8.2} {:>7.2}", - self.prompt_len, - self.generated_len, - edit_prediction.true_positives, - edit_prediction.false_positives, - edit_prediction.false_negatives, - edit_prediction.precision() * 100.0, - edit_prediction.recall() * 100.0, - edit_prediction.f1_score() * 100.0 + " ├─ exact lines {:<6} {:<6} {:<6} {:>8.2} {:>7.2} {:>6.2}", + line_match.true_positives, + line_match.false_positives, + line_match.false_negatives, + line_match.precision() * 100.0, + line_match.recall() * 100.0, + line_match.f1_score() * 100.0 + )?; + writeln!( + f, + " └─ diff chrF {:<6} {:<6} {:<6} {:>8} {:>8} {:>6.2}", + "-", "-", "-", "-", "-", edit_scores.chr_f )?; } Ok(()) @@ -403,21 +371,12 @@ fn evaluate(example: &Example, preds: &PredictionDetails, predict: bool) -> Eval .lines() .map(DiffLine::parse) .collect::>(); - let expected_patch_lines = expected_patch - .iter() - .filter(|line| matches!(line, DiffLine::Addition(_) | DiffLine::Deletion(_))) - .map(|line| line.to_string()) - .collect(); + let actual_patch = preds.diff.lines().map(DiffLine::parse).collect::>(); - let actual_patch_lines = preds - .diff - .lines() - .map(DiffLine::parse) - .filter(|line| matches!(line, DiffLine::Addition(_) | DiffLine::Deletion(_))) - .map(|line| line.to_string()) - .collect(); + let line_match = metrics::line_match_score(&expected_patch, &actual_patch); + let chr_f = metrics::delta_chr_f(&expected_patch, &actual_patch); - eval_result.edit_prediction = Some(Scores::new(&expected_patch_lines, &actual_patch_lines)); + eval_result.edit_scores = Some(EditScores { line_match, chr_f }); } eval_result @@ -500,12 +459,12 @@ fn write_bucketed_analysis( diff: execution_data.diff.clone(), is_correct: { evaluation_result - .edit_prediction + .edit_scores .as_ref() - .map_or(false, |edit_prediction| { - edit_prediction.false_positives == 0 - && edit_prediction.false_negatives == 0 - && edit_prediction.true_positives > 0 + .map_or(false, |edit_scores| { + edit_scores.line_match.false_positives == 0 + && edit_scores.line_match.false_negatives == 0 + && edit_scores.line_match.true_positives > 0 }) }, execution_indices: vec![execution_data.execution_id.clone()], diff --git a/crates/zeta_cli/src/main.rs b/crates/zeta_cli/src/main.rs index 2d5a23e31f463455871494d123a4988b41b5bd66..d72a0f5cf7cf00166a2bbaa60c1700d1007fc8af 100644 --- a/crates/zeta_cli/src/main.rs +++ b/crates/zeta_cli/src/main.rs @@ -1,6 +1,7 @@ mod evaluate; mod example; mod headless; +mod metrics; mod paths; mod predict; mod source_location; diff --git a/crates/zeta_cli/src/metrics.rs b/crates/zeta_cli/src/metrics.rs new file mode 100644 index 0000000000000000000000000000000000000000..dd08459678eef6d04a6b656d19a4572d51a5b5c1 --- /dev/null +++ b/crates/zeta_cli/src/metrics.rs @@ -0,0 +1,380 @@ +use collections::{HashMap, HashSet}; +use zeta::udiff::DiffLine; + +type Counts = HashMap; +type CountsDelta = HashMap; + +#[derive(Default, Debug, Clone)] +pub struct Scores { + pub true_positives: usize, + pub false_positives: usize, + pub false_negatives: usize, +} + +impl Scores { + pub fn from_sets(expected: &HashSet, actual: &HashSet) -> Scores { + let true_positives = expected.intersection(actual).count(); + let false_positives = actual.difference(expected).count(); + let false_negatives = expected.difference(actual).count(); + + Scores { + true_positives, + false_positives, + false_negatives, + } + } + + pub fn from_counts(expected: &Counts, actual: &Counts) -> Scores { + let mut true_positives = 0; + let mut false_positives = 0; + let mut false_negatives = 0; + + for (ngram, &expected_count) in expected { + let actual_count = *actual.get(ngram).unwrap_or(&0); + if actual_count > expected_count { + false_positives += actual_count - expected_count; + } else { + false_negatives += expected_count - actual_count; + } + true_positives += expected_count.min(actual_count); + } + + for (ngram, &actual_count) in actual { + if !expected.contains_key(ngram) { + false_positives += actual_count; + } + } + + Scores { + true_positives, + false_positives, + false_negatives, + } + } + + pub fn to_markdown(&self) -> String { + format!( + " +Precision : {:.4} +Recall : {:.4} +F1 Score : {:.4} +True Positives : {} +False Positives : {} +False Negatives : {}", + self.precision(), + self.recall(), + self.f1_score(), + self.true_positives, + self.false_positives, + self.false_negatives + ) + } + + pub fn aggregate<'a>(scores: impl Iterator) -> Scores { + let mut true_positives = 0; + let mut false_positives = 0; + let mut false_negatives = 0; + + for score in scores { + true_positives += score.true_positives; + false_positives += score.false_positives; + false_negatives += score.false_negatives; + } + + Scores { + true_positives, + false_positives, + false_negatives, + } + } + + pub fn precision(&self) -> f64 { + if self.true_positives + self.false_positives == 0 { + 0.0 + } else { + self.true_positives as f64 / (self.true_positives + self.false_positives) as f64 + } + } + + pub fn recall(&self) -> f64 { + if self.true_positives + self.false_negatives == 0 { + 0.0 + } else { + self.true_positives as f64 / (self.true_positives + self.false_negatives) as f64 + } + } + + pub fn f1_score(&self) -> f64 { + let recall = self.recall(); + let precision = self.precision(); + if precision + recall == 0.0 { + 0.0 + } else { + 2.0 * precision * recall / (precision + recall) + } + } +} + +pub fn line_match_score(expected_patch: &[DiffLine], actual_patch: &[DiffLine]) -> Scores { + let expected_change_lines = expected_patch + .iter() + .filter(|line| matches!(line, DiffLine::Addition(_) | DiffLine::Deletion(_))) + .map(|line| line.to_string()) + .collect(); + + let actual_change_lines = actual_patch + .iter() + .filter(|line| matches!(line, DiffLine::Addition(_) | DiffLine::Deletion(_))) + .map(|line| line.to_string()) + .collect(); + + Scores::from_sets(&expected_change_lines, &actual_change_lines) +} + +enum ChrfWhitespace { + #[allow(unused)] + Unchanged, + Ignore, +} + +const CHR_F_CHAR_ORDER: usize = 6; +const CHR_F_BETA: f64 = 2.0; +const CHR_F_WHITESPACE: ChrfWhitespace = ChrfWhitespace::Ignore; + +/// Computes a delta-chrF score that compares two sets of edits. +/// +/// This metric works by: +/// 1. Reconstructing original, golden (expected result), and actual texts from diffs +/// 2. Computing n-gram count differences (deltas) between original→golden and original→actual +/// 3. Comparing these deltas to measure how well actual edits match expected edits +pub fn delta_chr_f(expected: &[DiffLine], actual: &[DiffLine]) -> f64 { + // Reconstruct texts from diffs + let mut original_text = String::new(); // state of the text before any edits + let mut golden_text = String::new(); // text after applying golden edits + let mut actual_text = String::new(); // text after applying actual edits + + for line in expected { + match line { + DiffLine::Context(s) => { + original_text.push_str(s); + golden_text.push_str(s); + } + DiffLine::Deletion(s) => { + original_text.push_str(s); + } + DiffLine::Addition(s) => { + golden_text.push_str(s); + } + _ => {} + } + } + + for line in actual { + match line { + DiffLine::Context(s) | DiffLine::Addition(s) => { + actual_text.push_str(s); + } + _ => {} + } + } + + // Edge case + if original_text == golden_text && golden_text == actual_text { + return 100.0; + } + + // Compute the metric + let original_ngrams = chr_f_ngram_counts(&original_text); + let golden_ngrams = chr_f_ngram_counts(&golden_text); + let actual_ngrams = chr_f_ngram_counts(&actual_text); + + let mut total_precision = 0.0; + let mut total_recall = 0.0; + + for order in 0..CHR_F_CHAR_ORDER { + let expected_delta = compute_ngram_delta(&golden_ngrams[order], &original_ngrams[order]); + let actual_delta = compute_ngram_delta(&actual_ngrams[order], &original_ngrams[order]); + + if expected_delta.is_empty() && actual_delta.is_empty() { + total_precision += 1.0; + total_recall += 1.0; + continue; + } + + let expected_counts = ngram_delta_to_counts(&expected_delta); + let actual_counts = ngram_delta_to_counts(&actual_delta); + + let score = Scores::from_counts(&expected_counts, &actual_counts); + total_precision += score.precision(); + total_recall += score.recall(); + } + + let prec = total_precision / CHR_F_CHAR_ORDER as f64; + let recall = total_recall / CHR_F_CHAR_ORDER as f64; + let f_score = if prec + recall == 0.0 { + 0.0 + } else { + (1.0 + CHR_F_BETA * CHR_F_BETA) * prec * recall / (CHR_F_BETA * CHR_F_BETA * prec + recall) + }; + + f_score * 100.0 +} + +fn chr_f_ngram_counts(text: &str) -> Vec { + // Ignore whitespace. The original chrF implementation skips all + // whitespace. We should consider compressing multiple consecutive + // spaces into one -- this may reflect our task more closely. + let text = match CHR_F_WHITESPACE { + ChrfWhitespace::Unchanged => text.to_string(), + ChrfWhitespace::Ignore => text + .chars() + .filter(|c| !c.is_whitespace()) + .collect::(), + }; + + (1..=CHR_F_CHAR_ORDER) + .map(|order| count_ngrams(&text, order)) + .collect() +} + +fn compute_ngram_delta(after: &Counts, before: &Counts) -> CountsDelta { + let mut delta = CountsDelta::default(); + + for (ngram, &before_count) in before { + let after_count = *after.get(ngram).unwrap_or(&0); + delta.insert(ngram.clone(), after_count as isize - before_count as isize); + } + + for (ngram, &after_count) in after { + if !before.contains_key(ngram) { + delta.insert(ngram.clone(), after_count as isize); + } + } + + delta +} + +/// Convert negative counts to special deletion tokens. +/// For example, if expected delta is {"foo": -1} and actual delta is {"bar": -1}, +/// we convert it to {"¬foo": +1} and {"¬bar": +1}. This way _not_ deleting "foo" +/// will result in a false negative, and mistakenly deleting "bar" will result in a false positive. +fn ngram_delta_to_counts(delta: &CountsDelta) -> Counts { + let mut counts = Counts::default(); + + for (ngram, &delta) in delta { + if delta > 0 { + counts.insert(ngram.clone(), delta as usize); + } else { + counts.insert(format!("¬{ngram}"), delta.unsigned_abs()); + } + } + + counts +} + +fn count_ngrams(text: &str, n: usize) -> Counts { + let chars: Vec = text.chars().collect(); + let mut counts = Counts::default(); + + for window in chars.windows(n) { + let ngram: String = window.iter().collect(); + *counts.entry(ngram).or_insert(0) += 1; + } + + counts +} + +#[cfg(test)] +mod test { + use super::*; + use zeta::udiff::DiffLine; + + #[test] + fn test_delta_chr_f_perfect_match() { + let diff = vec![ + DiffLine::Context("fn main() {"), + DiffLine::Deletion(" println!(\"Hello\");"), + DiffLine::Addition(" println!(\"Hello, World!\");"), + DiffLine::Context("}"), + ]; + + let score = delta_chr_f(&diff, &diff); + assert!((score - 100.0).abs() < 1e-2); + } + + #[test] + fn test_delta_chr_f_wrong_edit() { + // When the edit is wrong + let expected = vec![ + DiffLine::Context("one "), + DiffLine::Deletion("two "), + DiffLine::Context("three"), + ]; + + let actual = vec![ + DiffLine::Context("one "), + DiffLine::Context("two "), + DiffLine::Deletion("three"), + DiffLine::Addition("four"), + ]; + + // Then the score should be low + let score = delta_chr_f(&expected, &actual); + assert!(score > 20.0 && score < 40.0); + } + + #[test] + fn test_delta_chr_f_partial_match() { + let expected = vec![ + DiffLine::Deletion("let x = 42;"), + DiffLine::Addition("let x = 100;"), + ]; + + let actual = vec![ + DiffLine::Deletion("let x = 42;"), + DiffLine::Addition("let x = 99;"), + ]; + + // We got the edit location right, but the replacement text is wrong. + // Deleted ngrams will match, bringing the score somewhere in the middle. + let score = delta_chr_f(&expected, &actual); + assert!(score > 40.0 && score < 60.0); + } + + #[test] + fn test_delta_chr_f_missed_edit() { + // When predictions makes no changes + let expected = vec![ + DiffLine::Context("prefix "), + DiffLine::Deletion("old"), + DiffLine::Addition("new"), + DiffLine::Context(" suffix"), + ]; + + let actual = vec![ + DiffLine::Context("prefix "), + DiffLine::Context("old"), + DiffLine::Context(" suffix"), + ]; + + // Then the score should be low (all expected changes are false negatives) + let score = delta_chr_f(&expected, &actual); + assert!(score < 20.0); + } + + #[test] + fn test_delta_chr_f_extra_edit() { + // When adding unexpected content + let expected = vec![DiffLine::Context("hello"), DiffLine::Context("world")]; + + let actual = vec![ + DiffLine::Context("hello"), + DiffLine::Addition("extra"), + DiffLine::Context("world"), + ]; + + // Then the score should be low (all actual changes are false positives) + let score = delta_chr_f(&expected, &actual); + assert!(score < 20.0); + } +} From 8e04706c4d2b5ef454e1c1ffb9d5aac72118b55d Mon Sep 17 00:00:00 2001 From: Lukas Wirth Date: Thu, 27 Nov 2025 10:29:27 +0100 Subject: [PATCH 077/749] editor: Fix panic in wrap_map (#43650) Fixes ZED-3P9 We only clamped the end which for a completely wrong input could cause us to construct a reversed range which will end up underflowing later on. Release Notes: - N/A *or* Added/Fixed/Improved ... --- crates/editor/src/display_map/wrap_map.rs | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/crates/editor/src/display_map/wrap_map.rs b/crates/editor/src/display_map/wrap_map.rs index 6d9704b5f93c0ce48d413babdd59997b02f093e6..c33bf141b94866744c607e3410f051ae8b6e1fce 100644 --- a/crates/editor/src/display_map/wrap_map.rs +++ b/crates/editor/src/display_map/wrap_map.rs @@ -622,9 +622,10 @@ impl WrapSnapshot { if transforms.item().is_some_and(|t| t.is_isomorphic()) { input_start.0 += output_start.0 - transforms.start().0.0; } - let input_end = self - .to_tab_point(output_end) - .min(self.tab_snapshot.max_point()); + let input_end = self.to_tab_point(output_end); + let max_point = self.tab_snapshot.max_point(); + let input_start = input_start.min(max_point); + let input_end = input_end.min(max_point); WrapChunks { input_chunks: self.tab_snapshot.chunks( input_start..input_end, @@ -921,10 +922,10 @@ impl WrapChunks<'_> { if self.transforms.item().is_some_and(|t| t.is_isomorphic()) { input_start.0 += output_start.0 - self.transforms.start().0.0; } - let input_end = self - .snapshot - .to_tab_point(output_end) - .min(self.snapshot.tab_snapshot.max_point()); + let input_end = self.snapshot.to_tab_point(output_end); + let max_point = self.snapshot.tab_snapshot.max_point(); + let input_start = input_start.min(max_point); + let input_end = input_end.min(max_point); self.input_chunks.seek(input_start..input_end); self.input_chunk = Chunk::default(); self.output_position = output_start; From ab96155d6a968e73f1368a5af45888e70796aab2 Mon Sep 17 00:00:00 2001 From: Dino Date: Thu, 27 Nov 2025 11:00:38 +0000 Subject: [PATCH 078/749] buffer_search: Fix replace buttons not working if search bar is not focused (#43569) Update the way that both `search::buffer_search::BufferSearchBar.replace_next` and `search::buffer_search::BufferSearchBar.replace_all` are registered as listeners, so that we don't require the replacement editor to be focused in order for these listeners to be active, only requiring the replacement mode to be active in the buffer search bar. This means that, even if the user is focused on the buffer editor, if the "Replace Next Match" or "Replace All Matches" buttons are clicked, the replacement will be performed. Closes #42471 Release Notes: - Fixed issue with buffer search bar where the replacement buttons ("Replace Next Match" & "Replace All Matches") wouldn't work if search bar was not focused --- crates/search/src/buffer_search.rs | 52 +++++++++++++++++++++++++++--- crates/search/src/search_bar.rs | 2 +- 2 files changed, 49 insertions(+), 5 deletions(-) diff --git a/crates/search/src/buffer_search.rs b/crates/search/src/buffer_search.rs index 0b45455faea1c6cd4474ac630d725ee57e1021f4..d17efa635074f7898ab3ea829f3418e2ddd09934 100644 --- a/crates/search/src/buffer_search.rs +++ b/crates/search/src/buffer_search.rs @@ -432,10 +432,8 @@ impl Render for BufferSearchBar { })) .when(replacement, |this| { this.on_action(cx.listener(Self::toggle_replace)) - .when(in_replace, |this| { - this.on_action(cx.listener(Self::replace_next)) - .on_action(cx.listener(Self::replace_all)) - }) + .on_action(cx.listener(Self::replace_next)) + .on_action(cx.listener(Self::replace_all)) }) .when(case, |this| { this.on_action(cx.listener(Self::toggle_case_sensitive)) @@ -2549,6 +2547,52 @@ mod tests { ); } + #[gpui::test] + async fn test_replace_focus(cx: &mut TestAppContext) { + let (editor, search_bar, cx) = init_test(cx); + + editor.update_in(cx, |editor, window, cx| { + editor.set_text("What a bad day!", window, cx) + }); + + search_bar + .update_in(cx, |search_bar, window, cx| { + search_bar.search("bad", None, true, window, cx) + }) + .await + .unwrap(); + + // Calling `toggle_replace` in the search bar ensures that the "Replace + // *" buttons are rendered, so we can then simulate clicking the + // buttons. + search_bar.update_in(cx, |search_bar, window, cx| { + search_bar.toggle_replace(&ToggleReplace, window, cx) + }); + + search_bar.update_in(cx, |search_bar, window, cx| { + search_bar.replacement_editor.update(cx, |editor, cx| { + editor.set_text("great", window, cx); + }); + }); + + // Focus on the editor instead of the search bar, as we want to ensure + // that pressing the "Replace Next Match" button will work, even if the + // search bar is not focused. + cx.focus(&editor); + + // We'll not simulate clicking the "Replace Next Match " button, asserting that + // the replacement was done. + let button_bounds = cx + .debug_bounds("ICON-ReplaceNext") + .expect("'Replace Next Match' button should be visible"); + cx.simulate_click(button_bounds.center(), gpui::Modifiers::none()); + + assert_eq!( + editor.read_with(cx, |editor, cx| editor.text(cx)), + "What a great day!" + ); + } + struct ReplacementTestParams<'a> { editor: &'a Entity, search_bar: &'a Entity, diff --git a/crates/search/src/search_bar.rs b/crates/search/src/search_bar.rs index 61fa46ed9770fbaf49b43979d366655c1b658fc3..13b4df9574aa6b2568dd6db25c6b63551d9b6d03 100644 --- a/crates/search/src/search_bar.rs +++ b/crates/search/src/search_bar.rs @@ -29,7 +29,7 @@ pub(super) fn render_action_button( if !focus_handle.is_focused(window) { window.focus(&focus_handle); } - window.dispatch_action(action.boxed_clone(), cx) + window.dispatch_action(action.boxed_clone(), cx); } }) .tooltip(move |_window, cx| Tooltip::for_action_in(tooltip, action, &focus_handle, cx)) From bbdbfe3430e3c833459defa65fcabf73991bf219 Mon Sep 17 00:00:00 2001 From: Lena <241371603+zelenenka@users.noreply.github.com> Date: Thu, 27 Nov 2025 12:15:57 +0100 Subject: [PATCH 079/749] Auto-label new bugs/crashes with 'needs triage' (#43658) Release Notes: - N/A --- .github/ISSUE_TEMPLATE/1.bug-report.yml | 1 + .github/ISSUE_TEMPLATE/2.crash-report.yml | 1 + 2 files changed, 2 insertions(+) diff --git a/.github/ISSUE_TEMPLATE/1.bug-report.yml b/.github/ISSUE_TEMPLATE/1.bug-report.yml index 1fbb81af8e5e6bd8ebc8582c3528f5b88929f041..3fcf81113093be06f9477a0c89af979444fd016d 100644 --- a/.github/ISSUE_TEMPLATE/1.bug-report.yml +++ b/.github/ISSUE_TEMPLATE/1.bug-report.yml @@ -1,6 +1,7 @@ name: Report an issue description: Report an issue with Zed. type: Bug +labels: "state:needs triage" body: - type: markdown attributes: diff --git a/.github/ISSUE_TEMPLATE/2.crash-report.yml b/.github/ISSUE_TEMPLATE/2.crash-report.yml index 47cedcc3100854060c2cbde2147b754d89afef51..1613e59a4b120fe2cf8a2d79bfea2104cb95d855 100644 --- a/.github/ISSUE_TEMPLATE/2.crash-report.yml +++ b/.github/ISSUE_TEMPLATE/2.crash-report.yml @@ -1,6 +1,7 @@ name: Report a crash description: Zed is crashing or freezing or hanging. type: Crash +labels: "state:needs triage" body: - type: textarea attributes: From 007d648f5e9a3cc2df5964435b89d854955c077e Mon Sep 17 00:00:00 2001 From: Ben Brandt Date: Thu, 27 Nov 2025 14:01:41 +0100 Subject: [PATCH 080/749] acp: Add a timeout when initializing an ACP agent so the user isn't waiting forever (#43663) Sometimes we are unable to receive messages at all from an agent. This puts on upper bound on the `initialize` call so we can at least give a message to the user that something is wrong here. 30s might feel like too long, but I wanted to avoid some false positives in case there was something an agent needed to do at startup. This will still communicate to the user at some point that something is wrong, rather than leave them waiting forever with no signal that something is going wrong. Release Notes: - agent: Show an error message to the user if we are unable to initialize an ACP agent in a reasonable amount of time. --- crates/agent_ui/src/acp/thread_view.rs | 62 +++++++++++++++++++++++++- 1 file changed, 61 insertions(+), 1 deletion(-) diff --git a/crates/agent_ui/src/acp/thread_view.rs b/crates/agent_ui/src/acp/thread_view.rs index a2929ad23ba8558b61abbf1d25ffe3843a918c2e..ab662889abebb53d8ce3ef51f6cf8802b3b8eb46 100644 --- a/crates/agent_ui/src/acp/thread_view.rs +++ b/crates/agent_ui/src/acp/thread_view.rs @@ -498,7 +498,17 @@ impl AcpThreadView { Some(new_version_available_tx), ); - let connect_task = agent.connect(root_dir.as_deref(), delegate, cx); + let agent_name = agent.name(); + let timeout = cx.background_executor().timer(Duration::from_secs(30)); + let connect_task = smol::future::or( + agent.connect(root_dir.as_deref(), delegate, cx), + async move { + timeout.await; + Err(anyhow::Error::new(LoadError::Other( + format!("{agent_name} is unable to initialize after 30 seconds.").into(), + ))) + }, + ); let load_task = cx.spawn_in(window, async move |this, cx| { let connection = match connect_task.await { Ok((connection, login)) => { @@ -7358,4 +7368,54 @@ pub(crate) mod tests { assert_eq!(text, expected_txt); }) } + + #[gpui::test] + async fn test_initialize_timeout(cx: &mut TestAppContext) { + init_test(cx); + + struct InfiniteInitialize; + + impl AgentServer for InfiniteInitialize { + fn telemetry_id(&self) -> &'static str { + "test" + } + + fn logo(&self) -> ui::IconName { + ui::IconName::Ai + } + + fn name(&self) -> SharedString { + "Test".into() + } + + fn connect( + &self, + _root_dir: Option<&Path>, + _delegate: AgentServerDelegate, + cx: &mut App, + ) -> Task, Option)>> + { + cx.spawn(async |_| futures::future::pending().await) + } + + fn into_any(self: Rc) -> Rc { + self + } + } + + let (thread_view, cx) = setup_thread_view(InfiniteInitialize, cx).await; + + cx.executor().advance_clock(Duration::from_secs(31)); + cx.run_until_parked(); + + let error = thread_view.read_with(cx, |thread_view, _| match &thread_view.thread_state { + ThreadState::LoadError(err) => err.clone(), + _ => panic!("Incorrect thread state"), + }); + + match error { + LoadError::Other(str) => assert!(str.contains("initialize")), + _ => panic!("Unexpected load error"), + } + } } From 02fbafcda603bc3b6d63e2a0f778650b44f02436 Mon Sep 17 00:00:00 2001 From: Piotr Osiewicz <24362066+osiewicz@users.noreply.github.com> Date: Thu, 27 Nov 2025 14:46:43 +0100 Subject: [PATCH 081/749] release_version: Do not use prerelease field (#43669) - **release_channel: Do not use prerelease channel for build id** Prerelease channel specifiers always compare as less than to non-prerelease, which led to 2 auto-update bugs fixed in https://github.com/zed-industries/zed/pull/43595 and https://github.com/zed-industries/zed/pull/43611. We'll use a dot-delimited build specifiers in form: release-channel.build_number.sha1 instead - **auto_update: Do not display full build metadata in update notification** Release Notes: - N/A --- Cargo.lock | 1 + crates/auto_update_ui/Cargo.toml | 1 + crates/auto_update_ui/src/auto_update_ui.rs | 4 +++- crates/release_channel/src/lib.rs | 12 ++++++++++-- 4 files changed, 15 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index f10c2e1d13210d67d16d584637c0fb7b71d61eec..8d4baa2e5221c23ff57a227a94dae4ae3859ec83 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1380,6 +1380,7 @@ dependencies = [ "http_client", "markdown_preview", "release_channel", + "semver", "serde", "serde_json", "smol", diff --git a/crates/auto_update_ui/Cargo.toml b/crates/auto_update_ui/Cargo.toml index 0e31f94f5ee268cdc3274dea747bd0b05d9c80eb..2b1421e35dcbcf6fac40cd0e97a3dc839da58d9e 100644 --- a/crates/auto_update_ui/Cargo.toml +++ b/crates/auto_update_ui/Cargo.toml @@ -20,6 +20,7 @@ gpui.workspace = true http_client.workspace = true markdown_preview.workspace = true release_channel.workspace = true +semver.workspace = true serde.workspace = true serde_json.workspace = true smol.workspace = true diff --git a/crates/auto_update_ui/src/auto_update_ui.rs b/crates/auto_update_ui/src/auto_update_ui.rs index aeaa6ae93e635a6cab1487400fb58bd7be1bc6e1..6c32ee3b6c9b9c4974a287ff0e9a988472cecf3b 100644 --- a/crates/auto_update_ui/src/auto_update_ui.rs +++ b/crates/auto_update_ui/src/auto_update_ui.rs @@ -148,7 +148,9 @@ pub fn notify_if_app_was_updated(cx: &mut App) { let should_show_notification = should_show_notification.await?; if should_show_notification { cx.update(|cx| { - let version = updater.read(cx).current_version(); + let mut version = updater.read(cx).current_version(); + version.build = semver::BuildMetadata::EMPTY; + version.pre = semver::Prerelease::EMPTY; let app_name = ReleaseChannel::global(cx).display_name(); show_app_notification( NotificationId::unique::(), diff --git a/crates/release_channel/src/lib.rs b/crates/release_channel/src/lib.rs index e84bf91c1db5e891abae0aeb67089cc40b1ec009..65201ccc46caccdf4912b69fa296d468dfdea95d 100644 --- a/crates/release_channel/src/lib.rs +++ b/crates/release_channel/src/lib.rs @@ -90,11 +90,19 @@ impl AppVersion { } else { pkg_version.parse().expect("invalid version in Cargo.toml") }; + let mut pre = String::from(RELEASE_CHANNEL.dev_name()); + if let Some(build_id) = build_id { - version.pre = semver::Prerelease::new(&build_id).expect("Invalid build identifier"); + pre.push('.'); + pre.push_str(&build_id); } + if let Some(sha) = commit_sha { - version.build = semver::BuildMetadata::new(&sha.0).expect("Invalid build metadata"); + pre.push('.'); + pre.push_str(&sha.0); + } + if let Ok(build) = semver::BuildMetadata::new(&pre) { + version.build = build; } version From c2281779af56bd52c829ccd31aae4eb82b682ebc Mon Sep 17 00:00:00 2001 From: Piotr Osiewicz <24362066+osiewicz@users.noreply.github.com> Date: Thu, 27 Nov 2025 15:08:43 +0100 Subject: [PATCH 082/749] auto_update: Tentatively prevent auto-update loop on Nightly again (#43670) Release Notes: - N/A --- crates/auto_update/src/auto_update.rs | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/crates/auto_update/src/auto_update.rs b/crates/auto_update/src/auto_update.rs index 06e87c53dba2acfffb03cdc75cbbaa7cc2f44a6a..0c122717d7a377e5aa5e8d23cab4de435bd67e33 100644 --- a/crates/auto_update/src/auto_update.rs +++ b/crates/auto_update/src/auto_update.rs @@ -639,10 +639,11 @@ impl AutoUpdater { if let AutoUpdateStatus::Updated { version, .. } = status { match version { VersionCheckType::Sha(cached_version) => { - let should_download = parsed_fetched_version - .as_ref() - .ok() - .is_none_or(|version| version.build.as_str() != cached_version.full()); + let should_download = + parsed_fetched_version.as_ref().ok().is_none_or(|version| { + version.build.as_str().rsplit('.').next() + != Some(&cached_version.full()) + }); let newer_version = should_download .then(|| VersionCheckType::Sha(AppCommitSha::new(fetched_version))); return Ok(newer_version); @@ -662,10 +663,9 @@ impl AutoUpdater { .ok() .flatten() .map(|sha| { - parsed_fetched_version - .as_ref() - .ok() - .is_none_or(|version| version.build.as_str() != sha) + parsed_fetched_version.as_ref().ok().is_none_or(|version| { + version.build.as_str().rsplit('.').next() != Some(&sha) + }) }) .unwrap_or(true); let newer_version = should_download From 5f0212de5fb76c91c4b8991f1bc8ed1464a2e527 Mon Sep 17 00:00:00 2001 From: Danilo Leal <67129314+danilo-leal@users.noreply.github.com> Date: Thu, 27 Nov 2025 13:10:55 -0300 Subject: [PATCH 083/749] Better promote edit prediction when signed out (#43665) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Introducing this little popover here that's aimed at better communicating what Zed's built-in edit prediction feature is and how much people can get of it for free by purely just signing in. Screenshot 2025-11-27 at 9  50@2x Release Notes: - N/A --- Cargo.lock | 1 + crates/client/src/zed_urls.rs | 8 + crates/edit_prediction_button/Cargo.toml | 1 + .../src/edit_prediction_button.rs | 145 +++++++++++++++++- 4 files changed, 149 insertions(+), 6 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 8d4baa2e5221c23ff57a227a94dae4ae3859ec83..082f420f11f12968dd5c6bec46c3fab4f2b37a7f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5308,6 +5308,7 @@ dependencies = [ "telemetry", "theme", "ui", + "util", "workspace", "zed_actions", "zeta", diff --git a/crates/client/src/zed_urls.rs b/crates/client/src/zed_urls.rs index 957d6c68f773db025b4ee604666f5b3d8101148b..2fe47251695446b54d6766c9a52bbd2da366d34e 100644 --- a/crates/client/src/zed_urls.rs +++ b/crates/client/src/zed_urls.rs @@ -59,3 +59,11 @@ pub fn agent_server_docs(cx: &App) -> String { server_url = server_url(cx) ) } + +/// Returns the URL to Zed's edit prediction documentation. +pub fn edit_prediction_docs(cx: &App) -> String { + format!( + "{server_url}/docs/ai/edit-prediction", + server_url = server_url(cx) + ) +} diff --git a/crates/edit_prediction_button/Cargo.toml b/crates/edit_prediction_button/Cargo.toml index 9062aca3c56f527385aecb000ebcd625f588eb9a..b7ec07e1e2b24d1d1b851913195afdbf58376da5 100644 --- a/crates/edit_prediction_button/Cargo.toml +++ b/crates/edit_prediction_button/Cargo.toml @@ -32,6 +32,7 @@ settings.workspace = true supermaven.workspace = true telemetry.workspace = true ui.workspace = true +util.workspace = true workspace.workspace = true zed_actions.workspace = true zeta.workspace = true diff --git a/crates/edit_prediction_button/src/edit_prediction_button.rs b/crates/edit_prediction_button/src/edit_prediction_button.rs index 254caa698aa05214f73a749e540233952db4978b..ba371b53aebe8c8f2db01501e01391125341a457 100644 --- a/crates/edit_prediction_button/src/edit_prediction_button.rs +++ b/crates/edit_prediction_button/src/edit_prediction_button.rs @@ -11,7 +11,7 @@ use fs::Fs; use gpui::{ Action, Animation, AnimationExt, App, AsyncWindowContext, Corner, Entity, FocusHandle, Focusable, IntoElement, ParentElement, Render, Subscription, WeakEntity, actions, div, - pulsating_between, + ease_in_out, pulsating_between, }; use indoc::indoc; use language::{ @@ -34,6 +34,7 @@ use ui::{ Clickable, ContextMenu, ContextMenuEntry, DocumentationEdge, DocumentationSide, IconButton, IconButtonShape, Indicator, PopoverMenu, PopoverMenuHandle, ProgressBar, Tooltip, prelude::*, }; +use util::ResultExt as _; use workspace::{ StatusItemView, Toast, Workspace, create_and_open_local_file, item::ItemHandle, notifications::NotificationId, @@ -322,7 +323,7 @@ impl Render for EditPredictionButton { let tooltip_meta = if self.user_store.read(cx).current_user().is_some() { "Choose a Plan" } else { - "Sign In" + "Sign In To Use" }; return div().child( @@ -357,6 +358,7 @@ impl Render for EditPredictionButton { } let show_editor_predictions = self.editor_show_predictions; + let user = self.user_store.read(cx).current_user(); let icon_button = IconButton::new("zed-predict-pending-button", zeta_icon) .shape(IconButtonShape::Square) @@ -372,10 +374,18 @@ impl Render for EditPredictionButton { }, ) .when(!self.popover_menu_handle.is_deployed(), |element| { + let user = user.clone(); element.tooltip(move |_window, cx| { if enabled { if show_editor_predictions { Tooltip::for_action("Edit Prediction", &ToggleMenu, cx) + } else if user.is_none() { + Tooltip::with_meta( + "Edit Prediction", + Some(&ToggleMenu), + "Sign In To Use", + cx, + ) } else { Tooltip::with_meta( "Edit Prediction", @@ -398,11 +408,25 @@ impl Render for EditPredictionButton { let this = cx.weak_entity(); let mut popover_menu = PopoverMenu::new("zeta") - .menu(move |window, cx| { - this.update(cx, |this, cx| { - this.build_zeta_context_menu(provider, window, cx) + .when(user.is_some(), |popover_menu| { + let this = this.clone(); + + popover_menu.menu(move |window, cx| { + this.update(cx, |this, cx| { + this.build_zeta_context_menu(provider, window, cx) + }) + .ok() + }) + }) + .when(user.is_none(), |popover_menu| { + let this = this.clone(); + + popover_menu.menu(move |window, cx| { + this.update(cx, |this, cx| { + this.build_zeta_upsell_context_menu(window, cx) + }) + .ok() }) - .ok() }) .anchor(Corner::BottomRight) .with_handle(self.popover_menu_handle.clone()); @@ -1045,6 +1069,55 @@ impl EditPredictionButton { }) } + fn build_zeta_upsell_context_menu( + &self, + window: &mut Window, + cx: &mut Context, + ) -> Entity { + ContextMenu::build(window, cx, |mut menu, _window, cx| { + menu = menu + .custom_row(move |_window, cx| { + let description = indoc! { + "Sign in for 2,000 worth of accepted suggestions at every keystroke, \ + powered by Zeta, our open-source, open-data model." + }; + + v_flex() + .max_w_64() + .h(rems_from_px(148.)) + .child(render_zeta_tab_animation(cx)) + .child(Label::new("Edit Prediction")) + .child( + Label::new(description) + .color(Color::Muted) + .size(LabelSize::Small), + ) + .into_any_element() + }) + .separator() + .entry("Sign In & Start Using", None, |window, cx| { + let client = Client::global(cx); + window + .spawn(cx, async move |cx| { + client + .sign_in_with_optional_connect(true, &cx) + .await + .log_err(); + }) + .detach(); + }) + .link( + "Learn More", + OpenBrowser { + url: zed_urls::edit_prediction_docs(cx), + } + .boxed_clone(), + ); + + menu + }) + } + pub fn update_enabled(&mut self, editor: Entity, cx: &mut Context) { let editor = editor.read(cx); let snapshot = editor.buffer().read(cx).snapshot(cx); @@ -1248,6 +1321,66 @@ fn toggle_edit_prediction_mode(fs: Arc, mode: EditPredictionsMode, cx: & } } +fn render_zeta_tab_animation(cx: &App) -> impl IntoElement { + let tab = |n: u64, inverted: bool| { + let text_color = cx.theme().colors().text; + + h_flex().child( + h_flex() + .text_size(TextSize::XSmall.rems(cx)) + .text_color(text_color) + .child("tab") + .with_animation( + ElementId::Integer(n), + Animation::new(Duration::from_secs(4)).repeat(), + move |tab, delta| { + let n_f32 = n as f32; + + let delta = if inverted { + (delta - 0.15 * (5.0 - n_f32)) / 0.7 + } else { + (delta - 0.15 * n_f32) / 0.7 + }; + + let delta = 1.0 - (0.5 - delta).abs() * 2.; + let delta = ease_in_out(delta.clamp(0., 1.)); + let delta = 0.1 + 0.5 * delta; + + tab.text_color(text_color.opacity(delta)) + }, + ), + ) + }; + + let tab_sequence = |inverted: bool| { + h_flex() + .gap_1() + .child(tab(0, inverted)) + .child(tab(1, inverted)) + .child(tab(2, inverted)) + .child(tab(3, inverted)) + .child(tab(4, inverted)) + }; + + h_flex() + .my_1p5() + .p_4() + .justify_center() + .gap_2() + .rounded_xs() + .border_1() + .border_dashed() + .border_color(cx.theme().colors().border) + .bg(gpui::pattern_slash( + cx.theme().colors().border.opacity(0.5), + 1., + 8., + )) + .child(tab_sequence(true)) + .child(Icon::new(IconName::ZedPredict)) + .child(tab_sequence(false)) +} + fn copilot_settings_url(enterprise_uri: Option<&str>) -> String { match enterprise_uri { Some(uri) => { From aa899f6d785826446f105c18f94e863f8c2e2b94 Mon Sep 17 00:00:00 2001 From: Lukas Wirth Date: Thu, 27 Nov 2025 18:14:42 +0100 Subject: [PATCH 084/749] gpui: Give windows message loop processing chances when we overload the main thread with tasks (#43678) This reduces hangs on windows when we have many tasks queued up on the main thread that yield a lot. Release Notes: - Reduced hangs on windows in some situations --- .../gpui/src/platform/windows/dispatcher.rs | 2 +- crates/gpui/src/platform/windows/platform.rs | 23 +++++++++++++++++-- 2 files changed, 22 insertions(+), 3 deletions(-) diff --git a/crates/gpui/src/platform/windows/dispatcher.rs b/crates/gpui/src/platform/windows/dispatcher.rs index f543202e77c75acfd007d632b2023317dcba43b6..dd53c86f5ed687c9b22a08779f262392f44a66ce 100644 --- a/crates/gpui/src/platform/windows/dispatcher.rs +++ b/crates/gpui/src/platform/windows/dispatcher.rs @@ -25,7 +25,7 @@ pub(crate) struct WindowsDispatcher { pub(crate) wake_posted: AtomicBool, main_sender: Sender, main_thread_id: ThreadId, - platform_window_handle: SafeHwnd, + pub(crate) platform_window_handle: SafeHwnd, validation_number: usize, } diff --git a/crates/gpui/src/platform/windows/platform.rs b/crates/gpui/src/platform/windows/platform.rs index 942cb62d2216c8d7cd5ea4cf75c4e4fa4a7d007f..548b08eca8f254825bc5a42bf74089c0f230c4ac 100644 --- a/crates/gpui/src/platform/windows/platform.rs +++ b/crates/gpui/src/platform/windows/platform.rs @@ -813,9 +813,28 @@ impl WindowsPlatformInner { #[inline] fn run_foreground_task(&self) -> Option { + const MAIN_TASK_TIMEOUT: u128 = 10; + + let start = std::time::Instant::now(); loop { - for runnable in self.main_receiver.drain() { - WindowsDispatcher::execute_runnable(runnable); + loop { + if start.elapsed().as_millis() >= MAIN_TASK_TIMEOUT { + // requeue main thread dispatch and bail, allowing more system messages to be processed + unsafe { + PostMessageW( + Some(self.dispatcher.platform_window_handle.as_raw()), + WM_GPUI_TASK_DISPATCHED_ON_MAIN_THREAD, + WPARAM(self.validation_number), + LPARAM(0), + ) + .log_err(); + } + return Some(0); + } + match self.main_receiver.try_recv() { + Err(_) => break, + Ok(runnable) => WindowsDispatcher::execute_runnable(runnable), + } } // Someone could enqueue a Runnable here. The flag is still true, so they will not PostMessage. From d82be979630a662677d09f5f765c3acf40aef3fd Mon Sep 17 00:00:00 2001 From: Bennet Bo Fenner Date: Thu, 27 Nov 2025 18:47:12 +0100 Subject: [PATCH 085/749] acp: Support using @mentions after typing slash command (#43681) Release Notes: - acp: Allow using @mentions after typing slash command --- crates/agent_ui/src/completion_provider.rs | 62 ++++++++++++++++------ 1 file changed, 47 insertions(+), 15 deletions(-) diff --git a/crates/agent_ui/src/completion_provider.rs b/crates/agent_ui/src/completion_provider.rs index 61ce313cb0c0c6ed91a08aa07544e766de5c581a..ec5d9c045acfe856409715770cb4c55f7a3e0e92 100644 --- a/crates/agent_ui/src/completion_provider.rs +++ b/crates/agent_ui/src/completion_provider.rs @@ -861,7 +861,7 @@ impl CompletionProvider for PromptCompletio let offset_to_line = buffer.point_to_offset(line_start); let mut lines = buffer.text_for_range(line_start..position).lines(); let line = lines.next()?; - ContextCompletion::try_parse(line, offset_to_line, &self.source.supported_modes(cx)) + PromptCompletion::try_parse(line, offset_to_line, &self.source.supported_modes(cx)) }); let Some(state) = state else { return Task::ready(Ok(Vec::new())); @@ -880,7 +880,7 @@ impl CompletionProvider for PromptCompletio let editor = self.editor.clone(); let mention_set = self.mention_set.downgrade(); match state { - ContextCompletion::SlashCommand(SlashCommandCompletion { + PromptCompletion::SlashCommand(SlashCommandCompletion { command, argument, .. }) => { let search_task = self.search_slash_commands(command.unwrap_or_default(), cx); @@ -943,7 +943,7 @@ impl CompletionProvider for PromptCompletio }]) }) } - ContextCompletion::Mention(MentionCompletion { mode, argument, .. }) => { + PromptCompletion::Mention(MentionCompletion { mode, argument, .. }) => { let query = argument.unwrap_or_default(); let search_task = self.search_mentions(mode, query, Arc::::default(), cx); @@ -1085,12 +1085,12 @@ impl CompletionProvider for PromptCompletio let offset_to_line = buffer.point_to_offset(line_start); let mut lines = buffer.text_for_range(line_start..position).lines(); if let Some(line) = lines.next() { - ContextCompletion::try_parse(line, offset_to_line, &self.source.supported_modes(cx)) + PromptCompletion::try_parse(line, offset_to_line, &self.source.supported_modes(cx)) .filter(|completion| { // Right now we don't support completing arguments of slash commands let is_slash_command_with_argument = matches!( completion, - ContextCompletion::SlashCommand(SlashCommandCompletion { + PromptCompletion::SlashCommand(SlashCommandCompletion { argument: Some(_), .. }) @@ -1160,12 +1160,13 @@ fn confirm_completion_callback( }) } -enum ContextCompletion { +#[derive(Debug, PartialEq)] +enum PromptCompletion { SlashCommand(SlashCommandCompletion), Mention(MentionCompletion), } -impl ContextCompletion { +impl PromptCompletion { fn source_range(&self) -> Range { match self { Self::SlashCommand(completion) => completion.source_range.clone(), @@ -1178,15 +1179,14 @@ impl ContextCompletion { offset_to_line: usize, supported_modes: &[PromptContextType], ) -> Option { - if let Some(command) = SlashCommandCompletion::try_parse(line, offset_to_line) { - Some(Self::SlashCommand(command)) - } else if let Some(mention) = - MentionCompletion::try_parse(line, offset_to_line, supported_modes) - { - Some(Self::Mention(mention)) - } else { - None + if line.contains('@') { + if let Some(mention) = + MentionCompletion::try_parse(line, offset_to_line, supported_modes) + { + return Some(Self::Mention(mention)); + } } + SlashCommandCompletion::try_parse(line, offset_to_line).map(Self::SlashCommand) } } @@ -1653,6 +1653,38 @@ fn selection_ranges( mod tests { use super::*; + #[test] + fn test_prompt_completion_parse() { + let supported_modes = vec![PromptContextType::File, PromptContextType::Symbol]; + + assert_eq!( + PromptCompletion::try_parse("/", 0, &supported_modes), + Some(PromptCompletion::SlashCommand(SlashCommandCompletion { + source_range: 0..1, + command: None, + argument: None, + })) + ); + + assert_eq!( + PromptCompletion::try_parse("@", 0, &supported_modes), + Some(PromptCompletion::Mention(MentionCompletion { + source_range: 0..1, + mode: None, + argument: None, + })) + ); + + assert_eq!( + PromptCompletion::try_parse("/test @file", 0, &supported_modes), + Some(PromptCompletion::Mention(MentionCompletion { + source_range: 6..11, + mode: Some(PromptContextType::File), + argument: None, + })) + ); + } + #[test] fn test_slash_command_completion_parse() { assert_eq!( From 64633bade4cac6351417a2147effa5c4ace9c9ba Mon Sep 17 00:00:00 2001 From: Lukas Wirth Date: Thu, 27 Nov 2025 19:51:19 +0100 Subject: [PATCH 086/749] gpui(windows): Prioritize system messages when exceeding main thread task budget (#43682) Release Notes: - Improved responsiveness on windows when there is a lot of tasks running in the foreground --- crates/gpui/src/platform/windows/platform.rs | 19 +++++++++---------- 1 file changed, 9 insertions(+), 10 deletions(-) diff --git a/crates/gpui/src/platform/windows/platform.rs b/crates/gpui/src/platform/windows/platform.rs index 548b08eca8f254825bc5a42bf74089c0f230c4ac..14b9511581a55a7654c8a48fc8f383c560c1a0ee 100644 --- a/crates/gpui/src/platform/windows/platform.rs +++ b/crates/gpui/src/platform/windows/platform.rs @@ -819,17 +819,16 @@ impl WindowsPlatformInner { loop { loop { if start.elapsed().as_millis() >= MAIN_TASK_TIMEOUT { - // requeue main thread dispatch and bail, allowing more system messages to be processed - unsafe { - PostMessageW( - Some(self.dispatcher.platform_window_handle.as_raw()), - WM_GPUI_TASK_DISPATCHED_ON_MAIN_THREAD, - WPARAM(self.validation_number), - LPARAM(0), - ) - .log_err(); + // we spent our budget on gpui tasks, we likely have a lot of work queued so drain system events first + // before returning to main thread task work + let mut msg = MSG::default(); + let peek_msg_type = PM_REMOVE | PM_QS_INPUT | PM_QS_PAINT; + while unsafe { PeekMessageW(&mut msg, None, 0, 0, peek_msg_type) }.as_bool() { + if translate_accelerator(&msg).is_none() { + _ = unsafe { TranslateMessage(&msg) }; + unsafe { DispatchMessageW(&msg) }; + } } - return Some(0); } match self.main_receiver.try_recv() { Err(_) => break, From d97d4f3949bedaf08e02c30bae9b0f39e6f4353b Mon Sep 17 00:00:00 2001 From: Xiaobo Liu Date: Fri, 28 Nov 2025 03:19:54 +0800 Subject: [PATCH 087/749] ui: Remove early return after painting existing menu (#43631) Release Notes: - Fixed right-click context menu UX issues Signed-off-by: Xiaobo Liu --- crates/ui/src/components/right_click_menu.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/crates/ui/src/components/right_click_menu.rs b/crates/ui/src/components/right_click_menu.rs index 761189671b935bf1f3d9e3f7d4d547528cf20196..dff423073710121bb0bc0fafdb8ab3108b746bde 100644 --- a/crates/ui/src/components/right_click_menu.rs +++ b/crates/ui/src/components/right_click_menu.rs @@ -223,7 +223,6 @@ impl Element for RightClickMenu { if let Some(mut menu) = request_layout.menu_element.take() { menu.paint(window, cx); - return; } let Some(builder) = this.menu_builder.take() else { From fa070c50e57e7de06b45336578f0a6f36d9c0ec8 Mon Sep 17 00:00:00 2001 From: Xiaobo Liu Date: Fri, 28 Nov 2025 03:33:48 +0800 Subject: [PATCH 088/749] editor: Show "Toggle Excerpt Unfold" tooltip when buffer is fold (#43626) Release Notes: - N/A --------- Signed-off-by: Xiaobo Liu Co-authored-by: Danilo Leal --- crates/editor/src/element.rs | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/crates/editor/src/element.rs b/crates/editor/src/element.rs index c85528353fc23ac2da4cca3682e28a30cda37f9c..dab04e8626385ada879485acecb93dddbcb4b940 100644 --- a/crates/editor/src/element.rs +++ b/crates/editor/src/element.rs @@ -3969,9 +3969,14 @@ impl EditorElement { .children(toggle_chevron_icon) .tooltip({ let focus_handle = focus_handle.clone(); + let is_folded_for_tooltip = is_folded; move |_window, cx| { Tooltip::with_meta_in( - "Toggle Excerpt Fold", + if is_folded_for_tooltip { + "Unfold Excerpt" + } else { + "Fold Excerpt" + }, Some(&ToggleFold), format!( "{} to toggle all", From 45285ee3452463bd08e780481256ba8180ab9c96 Mon Sep 17 00:00:00 2001 From: Danilo Leal <67129314+danilo-leal@users.noreply.github.com> Date: Thu, 27 Nov 2025 16:38:52 -0300 Subject: [PATCH 089/749] settings ui: Fix debugger step granularity setting (#43686) Closes https://github.com/zed-industries/zed/issues/43549 Release Notes: - N/A --- crates/settings_ui/src/page_data.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/settings_ui/src/page_data.rs b/crates/settings_ui/src/page_data.rs index 7cc5705ced9bc2267834044eff5e5def78182bc4..e06a98da49a3b0b60f704debe862ddf2bd4dd97e 100644 --- a/crates/settings_ui/src/page_data.rs +++ b/crates/settings_ui/src/page_data.rs @@ -4442,7 +4442,7 @@ pub(crate) fn settings_data(cx: &App) -> Vec { title: "Stepping Granularity", description: "Determines the stepping granularity for debug operations.", field: Box::new(SettingField { - json_path: Some("agent.default_height"), + json_path: Some("debugger.stepping_granularity"), pick: |settings_content| { settings_content .debugger From f10afd105921ce0449833eac28e7d351188277a1 Mon Sep 17 00:00:00 2001 From: Floyd Wang Date: Fri, 28 Nov 2025 04:02:03 +0800 Subject: [PATCH 090/749] component_preview: Pin the filter input at the top (#43636) When I scroll the page after selecting a component, the filter input disappears. It would be more helpful if it stayed fixed at the top. ## Before https://github.com/user-attachments/assets/e26031f1-d6e7-4ae2-a148-88f4c548a5cf ## After https://github.com/user-attachments/assets/f120355c-74d3-4724-9ffc-71fb7e3a4586 Release Notes: - N/A --- crates/zed/src/zed/component_preview.rs | 22 ++++++++++++---------- 1 file changed, 12 insertions(+), 10 deletions(-) diff --git a/crates/zed/src/zed/component_preview.rs b/crates/zed/src/zed/component_preview.rs index c231836aaa9219cab2ed913db70ad1704606dfd1..14a46d8882d1d3d371c50e9886062a124917a48d 100644 --- a/crates/zed/src/zed/component_preview.rs +++ b/crates/zed/src/zed/component_preview.rs @@ -653,10 +653,8 @@ impl Render for ComponentPreview { ) .child( v_flex() - .id("content-area") .flex_1() .size_full() - .overflow_y_scroll() .child( div() .p_2() @@ -665,14 +663,18 @@ impl Render for ComponentPreview { .border_color(cx.theme().colors().border) .child(self.filter_editor.clone()), ) - .child(match active_page { - PreviewPage::AllComponents => { - self.render_all_components(cx).into_any_element() - } - PreviewPage::Component(id) => self - .render_component_page(&id, window, cx) - .into_any_element(), - }), + .child( + div().id("content-area").flex_1().overflow_y_scroll().child( + match active_page { + PreviewPage::AllComponents => { + self.render_all_components(cx).into_any_element() + } + PreviewPage::Component(id) => self + .render_component_page(&id, window, cx) + .into_any_element(), + }, + ), + ), ) } } From 28dde14a33ee7b2a9858dfbb41b350e90118e4f3 Mon Sep 17 00:00:00 2001 From: Floyd Wang Date: Fri, 28 Nov 2025 04:09:19 +0800 Subject: [PATCH 091/749] ui: Fix custom size preview example of vector component (#43633) In the vector component custom size preview example, the image will overflow the content box. | Before | After | | - | - | | Before | After | Release Notes: - N/A --------- Co-authored-by: Danilo Leal --- crates/ui/src/components/image.rs | 33 +++++++++++++++++-------------- 1 file changed, 18 insertions(+), 15 deletions(-) diff --git a/crates/ui/src/components/image.rs b/crates/ui/src/components/image.rs index 8a14cffd3b2de2e184fd87a9212775c470e3118d..3e8cbd8fff7ba484b1b1eb9fd50da55998d7eae9 100644 --- a/crates/ui/src/components/image.rs +++ b/crates/ui/src/components/image.rs @@ -115,6 +115,8 @@ impl Component for Vector { } fn preview(_window: &mut Window, _cx: &mut App) -> Option { + let size = rems_from_px(60.); + Some( v_flex() .gap_6() @@ -124,11 +126,18 @@ impl Component for Vector { vec![ single_example( "Default", - Vector::square(VectorName::ZedLogo, rems(8.)).into_any_element(), + Vector::square(VectorName::ZedLogo, size).into_any_element(), ), single_example( "Custom Size", - Vector::new(VectorName::ZedLogo, rems(12.), rems(6.)) + h_flex() + .h(rems_from_px(120.)) + .justify_center() + .child(Vector::new( + VectorName::ZedLogo, + rems_from_px(120.), + rems_from_px(200.), + )) .into_any_element(), ), ], @@ -138,13 +147,13 @@ impl Component for Vector { vec![ single_example( "Accent Color", - Vector::square(VectorName::ZedLogo, rems(8.)) + Vector::square(VectorName::ZedLogo, size) .color(Color::Accent) .into_any_element(), ), single_example( "Error Color", - Vector::square(VectorName::ZedLogo, rems(8.)) + Vector::square(VectorName::ZedLogo, size) .color(Color::Error) .into_any_element(), ), @@ -152,17 +161,11 @@ impl Component for Vector { ), example_group_with_title( "Different Vectors", - vec![ - single_example( - "Zed Logo", - Vector::square(VectorName::ZedLogo, rems(8.)).into_any_element(), - ), - single_example( - "Zed X Copilot", - Vector::square(VectorName::ZedXCopilot, rems(8.)) - .into_any_element(), - ), - ], + vec![single_example( + "Zed X Copilot", + Vector::square(VectorName::ZedXCopilot, rems_from_px(100.)) + .into_any_element(), + )], ), ]) .into_any_element(), From 20b584398e6151f0950f18a2579b4ee0b4012c96 Mon Sep 17 00:00:00 2001 From: Remco Smits Date: Thu, 27 Nov 2025 21:12:03 +0100 Subject: [PATCH 092/749] checkbox: Fix showing cursor pointer for edge of the checkbox when it's disabled (#43577) This PR fixes that you saw the cursor pointer for disabled checkbox. This is only for the edge of the checkbox component, because we didn't check for the disabled case. **Before** https://github.com/user-attachments/assets/cfebad01-533b-4515-b8d9-4bcb839eaec4 **After** https://github.com/user-attachments/assets/969600de-081b-42df-a288-ca3db5758d12 Release Notes: - N/A --- crates/ui/src/components/toggle.rs | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/crates/ui/src/components/toggle.rs b/crates/ui/src/components/toggle.rs index a41dce6c61de1cabdbccee1478afe143feee4987..1e637f24194b86f0c06aef806975df635f45a6cd 100644 --- a/crates/ui/src/components/toggle.rs +++ b/crates/ui/src/components/toggle.rs @@ -247,7 +247,13 @@ impl RenderOnce for Checkbox { h_flex() .id(self.id) - .cursor_pointer() + .map(|this| { + if self.disabled { + this.cursor_not_allowed() + } else { + this.cursor_pointer() + } + }) .gap(DynamicSpacing::Base06.rems(cx)) .child(checkbox) .when_some( From 518ea716eeef2fd639f6349e84aad94b0e71dafb Mon Sep 17 00:00:00 2001 From: Scott Churchley Date: Thu, 27 Nov 2025 15:26:00 -0500 Subject: [PATCH 093/749] agent_ui: Truncate file path in context completions (#42682) Closes https://github.com/zed-industries/zed/issues/38753 Followed some prior art here: [a similar calculation ](https://github.com/zed-industries/zed/blob/e80b490ac00bfb5c206d3eb3f5c2dec10e742678/crates/file_finder/src/file_finder.rs#L1105) in `file_finder.rs` Release Notes: - improved visibility of long path names in context completions by truncating on the left when space is insufficient to render the full path Before: Screenshot of overflowing file paths After: Screenshot of truncated file paths --- crates/agent_ui/src/completion_provider.rs | 55 ++++++++++++++++++++-- crates/editor/src/code_context_menus.rs | 45 ++++++++++-------- 2 files changed, 75 insertions(+), 25 deletions(-) diff --git a/crates/agent_ui/src/completion_provider.rs b/crates/agent_ui/src/completion_provider.rs index ec5d9c045acfe856409715770cb4c55f7a3e0e92..2e3cf0d551fc649e61ae26e47fa53301def2aacc 100644 --- a/crates/agent_ui/src/completion_provider.rs +++ b/crates/agent_ui/src/completion_provider.rs @@ -7,7 +7,9 @@ use std::sync::atomic::AtomicBool; use acp_thread::MentionUri; use agent::{HistoryEntry, HistoryStore}; use anyhow::Result; -use editor::{CompletionProvider, Editor, ExcerptId}; +use editor::{ + CompletionProvider, Editor, ExcerptId, code_context_menus::COMPLETION_MENU_MAX_WIDTH, +}; use fuzzy::{PathMatch, StringMatch, StringMatchCandidate}; use gpui::{App, Entity, Task, WeakEntity}; use language::{Buffer, CodeLabel, CodeLabelBuilder, HighlightId}; @@ -25,6 +27,7 @@ use ui::prelude::*; use util::ResultExt as _; use util::paths::PathStyle; use util::rel_path::RelPath; +use util::truncate_and_remove_front; use workspace::Workspace; use crate::AgentPanel; @@ -336,14 +339,20 @@ impl PromptCompletionProvider { mention_set: WeakEntity, workspace: Entity, project: Entity, + label_max_chars: usize, cx: &mut App, ) -> Option { let path_style = project.read(cx).path_style(cx); let (file_name, directory) = extract_file_name_and_directory(&project_path.path, path_prefix, path_style); - let label = - build_code_label_for_path(&file_name, directory.as_ref().map(|s| s.as_ref()), None, cx); + let label = build_code_label_for_path( + &file_name, + directory.as_ref().map(|s| s.as_ref()), + None, + label_max_chars, + cx, + ); let abs_path = project.read(cx).absolute_path(&project_path, cx)?; @@ -392,6 +401,7 @@ impl PromptCompletionProvider { editor: WeakEntity, mention_set: WeakEntity, workspace: Entity, + label_max_chars: usize, cx: &mut App, ) -> Option { let project = workspace.read(cx).project().clone(); @@ -414,6 +424,7 @@ impl PromptCompletionProvider { &symbol.name, Some(&file_name), Some(symbol.range.start.0.row + 1), + label_max_chars, cx, ); @@ -852,7 +863,7 @@ impl CompletionProvider for PromptCompletio buffer: &Entity, buffer_position: Anchor, _trigger: CompletionContext, - _window: &mut Window, + window: &mut Window, cx: &mut Context, ) -> Task>> { let state = buffer.update(cx, |buffer, cx| { @@ -948,6 +959,31 @@ impl CompletionProvider for PromptCompletio let search_task = self.search_mentions(mode, query, Arc::::default(), cx); + // Calculate maximum characters available for the full label (file_name + space + directory) + // based on maximum menu width after accounting for padding, spacing, and icon width + let label_max_chars = { + // Base06 left padding + Base06 gap + Base06 right padding + icon width + let used_pixels = DynamicSpacing::Base06.px(cx) * 3.0 + + IconSize::XSmall.rems() * window.rem_size(); + + let style = window.text_style(); + let font_id = window.text_system().resolve_font(&style.font()); + let font_size = TextSize::Small.rems(cx).to_pixels(window.rem_size()); + + // Fallback em_width of 10px matches file_finder.rs fallback for TextSize::Small + let em_width = cx + .text_system() + .em_width(font_id, font_size) + .unwrap_or(px(10.0)); + + // Calculate available pixels for text (file_name + directory) + // Using max width since dynamic_width allows the menu to expand up to this + let available_pixels = COMPLETION_MENU_MAX_WIDTH - used_pixels; + + // Convert to character count (total available for file_name + directory) + (f32::from(available_pixels) / f32::from(em_width)) as usize + }; + cx.spawn(async move |_, cx| { let matches = search_task.await; @@ -984,6 +1020,7 @@ impl CompletionProvider for PromptCompletio mention_set.clone(), workspace.clone(), project.clone(), + label_max_chars, cx, ) } @@ -996,6 +1033,7 @@ impl CompletionProvider for PromptCompletio editor.clone(), mention_set.clone(), workspace.clone(), + label_max_chars, cx, ) } @@ -1595,6 +1633,7 @@ fn build_code_label_for_path( file: &str, directory: Option<&str>, line_number: Option, + label_max_chars: usize, cx: &App, ) -> CodeLabel { let variable_highlight_id = cx @@ -1608,7 +1647,13 @@ fn build_code_label_for_path( label.push_str(" ", None); if let Some(directory) = directory { - label.push_str(directory, variable_highlight_id); + let file_name_chars = file.chars().count(); + // Account for: file_name + space (ellipsis is handled by truncate_and_remove_front) + let directory_max_chars = label_max_chars + .saturating_sub(file_name_chars) + .saturating_sub(1); + let truncated_directory = truncate_and_remove_front(directory, directory_max_chars.max(5)); + label.push_str(&truncated_directory, variable_highlight_id); } if let Some(line_number) = line_number { label.push_str(&format!(" L{}", line_number), variable_highlight_id); diff --git a/crates/editor/src/code_context_menus.rs b/crates/editor/src/code_context_menus.rs index 6a07d39210773476b5f88764c5a21f292da48676..efecb70b59746b51db713b4a13153033d0dd3b10 100644 --- a/crates/editor/src/code_context_menus.rs +++ b/crates/editor/src/code_context_menus.rs @@ -49,6 +49,8 @@ pub const MENU_GAP: Pixels = px(4.); pub const MENU_ASIDE_X_PADDING: Pixels = px(16.); pub const MENU_ASIDE_MIN_WIDTH: Pixels = px(260.); pub const MENU_ASIDE_MAX_WIDTH: Pixels = px(500.); +pub const COMPLETION_MENU_MIN_WIDTH: Pixels = px(280.); +pub const COMPLETION_MENU_MAX_WIDTH: Pixels = px(540.); // Constants for the markdown cache. The purpose of this cache is to reduce flickering due to // documentation not yet being parsed. @@ -907,26 +909,29 @@ impl CompletionsMenu { }) }); - div().min_w(px(280.)).max_w(px(540.)).child( - ListItem::new(mat.candidate_id) - .inset(true) - .toggle_state(item_ix == selected_item) - .on_click(cx.listener(move |editor, _event, window, cx| { - cx.stop_propagation(); - if let Some(task) = editor.confirm_completion( - &ConfirmCompletion { - item_ix: Some(item_ix), - }, - window, - cx, - ) { - task.detach_and_log_err(cx) - } - })) - .start_slot::(start_slot) - .child(h_flex().overflow_hidden().child(completion_label)) - .end_slot::